def test_sas_access_file(self):
        # SAS URL is calculated from storage key, so this test runs live only
        if TestMode.need_recording_file(self.test_mode):
            return

        # Arrange
        file_name = self._create_file()
        
        token = self.fs.generate_file_shared_access_signature(
            self.share_name,
            None,
            file_name,
            permission=FilePermissions.READ,
            expiry=datetime.utcnow() + timedelta(hours=1),
        )

        # Act
        service = FileService(
            self.settings.STORAGE_ACCOUNT_NAME,
            sas_token=token,
            request_session=requests.Session(),
        )
        self._set_test_proxy(service, self.settings)
        result = service.get_file_to_bytes(self.share_name, None, file_name)

        # Assert
        self.assertEqual(self.short_byte_data, result.content)
示例#2
0
def transfer_fileshare_to_blob(config, fileshare_uri, output_model_name):
    ''' NB -- transfer proceeds via local temporary file! '''
    file_service = FileService(config.storage_account_name,
                               config.storage_account_key)
    blob_service = BlockBlobService(config.storage_account_name,
                                    config.storage_account_key)
    blob_service.create_container(config.container_trained_models)
    blob_service.create_container(config.predictions_container)

    uri_core = fileshare_uri.split('.file.core.windows.net/')[1].split('?')[0]
    fields = uri_core.split('/')
    fileshare = fields.pop(0)
    subdirectory = '/'.join(fields[:-1])
    file_name = '{}/{}'.format(output_model_name, fields[-1])

    with TemporaryFile() as f:
        file_service.get_file_to_stream(share_name=fileshare,
                                        directory_name=subdirectory,
                                        file_name=fields[-1],
                                        stream=f)
        f.seek(0)
        if 'predictions' in fields[-1]:
            blob_service.create_blob_from_stream(
                config.predictions_container,
                '{}_predictions_test_set.csv'.format(output_model_name), f)
        else:
            blob_service.create_blob_from_stream(
                config.container_trained_models, file_name, f)

    return
    def test_sas_signed_identifier(self):
        # SAS URL is calculated from storage key, so this test runs live only
        if TestMode.need_recording_file(self.test_mode):
            return

        # Arrange
        file_name = self._create_file()

        access_policy = AccessPolicy()
        access_policy.start = '2011-10-11'
        access_policy.expiry = '2018-10-12'
        access_policy.permission = FilePermissions.READ
        identifiers = {'testid': access_policy}

        resp = self.fs.set_share_acl(self.share_name, identifiers)

        token = self.fs.generate_file_shared_access_signature(
            self.share_name,
            None,
            file_name,
            id='testid'
            )

        # Act
        service = FileService(
            self.settings.STORAGE_ACCOUNT_NAME,
            sas_token=token,
            request_session=requests.Session(),
        )
        self._set_test_proxy(service, self.settings)
        result = service.get_file_to_bytes(self.share_name, None, file_name)

        # Assert
        self.assertEqual(self.short_byte_data, result.content)
示例#4
0
def main(files):

    configfile = (os.path.dirname(os.path.abspath(__file__)) + '/upload.config')
    print (configfile)

    if not os.path.isfile(configfile):
        print("Settings not found. Please create an upload.config file with the Azure file share account name, access key, file share name, and folder. Each value should be on its own line.")
        exit()
    if len(files) <1:
        print("No files provided for upload. Please supply file paths as arguments.")
        exit()

    # get settings - account, key, share, and folder in subsequent lines
    with open(configfile,"r") as config:
        settings = config.readlines()
        azure_account = settings[0].rstrip()
        azure_key = settings[1].rstrip()
        share = settings[2].rstrip()
        folder = settings[3].rstrip()

    file_service = FileService(account_name=azure_account, account_key=azure_key)

    # Arguments should just be an array of filenames.
    timestamp_suffix = datetime.now().strftime("%Y%m%d-%H%M_")
    for file in files:
        if not os.path.isfile(file):
            print(file, "not found")
        else:
            print("Uploading:", file)
            stampedfile = timestamp_suffix + os.path.basename(file)
            file_service.create_file_from_path(share, folder, stampedfile, file, progress_callback=progress)
            print(stampedfile," uploaded")
    def file_sas(self):
        share_name = self._create_share()
        self.service.create_directory(share_name, 'dir1')
        self.service.create_file_from_text(share_name, 'dir1', 'file1', b'hello world')

        # Read access only to this particular file
        # Expires in an hour
        token = self.service.generate_file_shared_access_signature(
            share_name,
            'dir1',
            'file1',
            FilePermissions.READ,
            datetime.utcnow() + timedelta(hours=1),
        )

        # Create a service and use the SAS
        sas_service = FileService(
            account_name=self.account.account_name,
            sas_token=token,
        )

        file = sas_service.get_file_to_text(share_name, 'dir1', 'file1')
        content = file.content  # hello world

        self.service.delete_share(share_name)
    def test_sas_signed_identifier(self):
        # SAS URL is calculated from storage key, so this test runs live only
        if TestMode.need_recording_file(self.test_mode):
            return

        # Arrange
        file_name = self._create_file()

        access_policy = AccessPolicy()
        access_policy.start = '2011-10-11'
        access_policy.expiry = '2018-10-12'
        access_policy.permission = FilePermissions.READ
        identifiers = {'testid': access_policy}

        resp = self.fs.set_share_acl(self.share_name, identifiers)

        token = self.fs.generate_file_shared_access_signature(self.share_name,
                                                              None,
                                                              file_name,
                                                              id='testid')

        # Act
        service = FileService(
            self.settings.STORAGE_ACCOUNT_NAME,
            sas_token=token,
            request_session=requests.Session(),
        )
        self._set_test_proxy(service, self.settings)
        result = service.get_file_to_bytes(self.share_name, None, file_name)

        # Assert
        self.assertEqual(self.short_byte_data, result.content)
    def sas_with_signed_identifiers(self):
        share_name = self._create_share()
        self.service.create_directory(share_name, 'dir1')
        self.service.create_file_from_text(share_name, 'dir1', 'file1', b'hello world')

        # Set access policy on share
        access_policy = AccessPolicy(permission=SharePermissions.READ,
                                     expiry=datetime.utcnow() + timedelta(hours=1))
        identifiers = {'id': access_policy}
        acl = self.service.set_share_acl(share_name, identifiers)

        # Wait 30 seconds for acl to propagate
        time.sleep(30)

        # Indicates to use the access policy set on the share
        token = self.service.generate_share_shared_access_signature(
            share_name,
            id='id'
        )

        # Create a service and use the SAS
        sas_service = FileService(
            account_name=self.account.account_name,
            sas_token=token,
        )

        file = sas_service.get_file_to_text(share_name, 'dir1', 'file1')
        content = file.content  # hello world

        self.service.delete_share(share_name)
示例#8
0
 def file_storage_connect(self):
     self.file_storage_url = self.get_property('fs_server', 'general')
     self.file_storage_user = self.get_property('fs_username', 'general')
     self.file_storage_pwd = self.get_property('fs_password', 'general')
     self.file_storage_share = self.get_property('fs_share', 'general')
     self.file_storage_dir = self.get_property('fs_directory_prefix',
                                               'general')
     self.file_service = FileService(account_name=self.file_storage_user,
                                     account_key=self.file_storage_pwd)
     try:
         if self.file_service.exists(self.file_storage_share):
             print(
                 'Connection to Azure file storage successfully established...'
             )
             if len(self.file_storage_dir
                    ) > 0 and not self.file_service.exists(
                        self.file_storage_share,
                        directory_name=self.file_storage_dir):
                 subdirs = self.file_storage_dir.split('/')
                 subdirfull = ""
                 for subdir in subdirs:
                     subdirfull += subdir
                     self.file_service.create_directory(
                         self.file_storage_share, subdirfull)
                     subdirfull += "/"
                 print('Created directory:' + self.file_storage_dir)
         else:
             print(
                 'Filaed to connect to Asure file storage, share does not exist: '
                 + self.file_storage_share)
     except Exception as ex:
         print('Error connecting to Azure file storage: ', ex)
    def test_sas_access_file(self):
        # SAS URL is calculated from storage key, so this test runs live only
        if TestMode.need_recording_file(self.test_mode):
            return

        # Arrange
        file_name = self._create_file()

        token = self.fs.generate_file_shared_access_signature(
            self.share_name,
            None,
            file_name,
            permission=FilePermissions.READ,
            expiry=datetime.utcnow() + timedelta(hours=1),
        )

        # Act
        service = FileService(
            self.settings.STORAGE_ACCOUNT_NAME,
            sas_token=token,
            request_session=requests.Session(),
        )
        self._set_test_proxy(service, self.settings)
        result = service.get_file_to_bytes(self.share_name, None, file_name)

        # Assert
        self.assertEqual(self.short_byte_data, result.content)
示例#10
0
def shares():
    # Create Container and Share
    global storage_account_key, blob_service, blob_share, file_service, file_share
    sak = storage_client.storage_accounts.list_keys(resourcegroupname,
                                                    storageaccountname)
    storage_account_key = sak.keys[0].value
    cloudstorage_client = CloudStorageAccount(storageaccountname,
                                              storage_account_key)
    blob_service = cloudstorage_client.create_block_blob_service()
    blob_share = blob_service.create_container(
        sharename, public_access=PublicAccess.Container)
    file_service = FileService(account_name=storageaccountname,
                               account_key=storage_account_key)
    file_share = file_service.create_share(sharename)
    # Copy Setup Files to Container and Share
    blob_service.create_blob_from_path(
        sharename,
        filename,
        filename,
    )
    file_service.create_file_from_path(
        sharename,
        '',
        filename,
        filename,
    )
示例#11
0
 def delete(self, remote_file):
     """Delete file from the cloud. The azure url format is https://myaccount.blob.core.windows.net/mycontainer/myblob.
      Args:
          remote_file(str): The path of the file to be deleted.
      Raises:
          :exc:`~..DriverError`: if the file is not uploaded correctly.
     """
     if 'core.windows.net' not in remote_file:
         self.logger.error(
             "Source or destination must be a azure storage url (format "
             "https://myaccount.blob.core.windows.net/mycontainer/myblob")
         raise DriverError
     parse_url = _parse_url(remote_file)
     key = self.storage_client.storage_accounts.list_keys(
         self.resource_group_name, parse_url.account).keys[0].value
     if parse_url.file_type == 'blob':
         bs = BlockBlobService(account_name=parse_url.account,
                               account_key=key)
         return bs.delete_blob(parse_url.container_or_share_name,
                               parse_url.file)
     elif parse_url.file_type == 'file':
         fs = FileService(account_name=parse_url.account, account_key=key)
         return fs.delete_file(parse_url.container_or_share_name,
                               parse_url.path, parse_url.file)
     else:
         raise ValueError(
             "This azure storage type is not valid. It should be blob or file."
         )
    def sas_with_signed_identifiers(self):
        share_name = self._create_share()
        self.service.create_directory(share_name, 'dir1')
        self.service.create_file_from_text(share_name, 'dir1', 'file1',
                                           b'hello world')

        # Set access policy on share
        access_policy = AccessPolicy(permission=SharePermissions.READ,
                                     expiry=datetime.utcnow() +
                                     timedelta(hours=1))
        identifiers = {'id': access_policy}
        acl = self.service.set_share_acl(share_name, identifiers)

        # Wait 30 seconds for acl to propagate
        time.sleep(30)

        # Indicates to use the access policy set on the share
        token = self.service.generate_share_shared_access_signature(share_name,
                                                                    id='id')

        # Create a service and use the SAS
        sas_service = FileService(
            account_name=self.account.account_name,
            sas_token=token,
        )

        file = sas_service.get_file_to_text(share_name, 'dir1', 'file1')
        content = file.content  # hello world

        self.service.delete_share(share_name)
示例#13
0
    def move_to_storage_account(self, file, storage='blob'):
        from hugme.__key__ import acc, key
        from datetime import datetime

        print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
              'Movendo arquivo final para o {} storage...'.format(object))

        if storage == 'blob':
            from azure.storage.blob import BlockBlobService, PublicAccess

            block_blob_service = BlockBlobService(account_name=acc,
                                                  account_key=key)
            block_blob_service.set_container_acl(
                'final', public_access=PublicAccess.Container)
            block_blob_service.create_blob_from_path(
                container_name='consumidorgov',
                blob_name='comparativo',
                file_path='consumidor_gov\\data\\' + file,
            )

        elif storage == 'files':
            from azure.storage.file import FileService

            file_service = FileService(account_name=acc, account_key=key)
            file_service.create_file_from_path(
                share_name='complains',
                directory_name='hugme',
                file_name='base.csv',
                local_file_path='' + file,
            )

        else:
            return False
    def file_sas(self):
        share_name = self._create_share()
        self.service.create_directory(share_name, 'dir1')
        self.service.create_file_from_text(share_name, 'dir1', 'file1',
                                           b'hello world')

        # Read access only to this particular file
        # Expires in an hour
        token = self.service.generate_file_shared_access_signature(
            share_name,
            'dir1',
            'file1',
            FilePermissions.READ,
            datetime.utcnow() + timedelta(hours=1),
        )

        # Create a service and use the SAS
        sas_service = FileService(
            account_name=self.account.account_name,
            sas_token=token,
        )

        file = sas_service.get_file_to_text(share_name, 'dir1', 'file1')
        content = file.content  # hello world

        self.service.delete_share(share_name)
示例#15
0
def getLatestModel(customer, modelName, storage_account_name,
                   storage_account_key):
    fileService = FileService(account_name=storage_account_name,
                              account_key=storage_account_key)
    if fileService.exists('trainedmodels', customer):
        modelTimestampArr = []
        files = fileService.list_directories_and_files('trainedmodels',
                                                       customer + '/' +
                                                       modelName,
                                                       prefix=modelName)

        for file in files:
            date = file.name.split('.')[0].split('_')[1]
            modelTimestampArr.append(date)

        latestModelFileName = modelName + '_' + max(modelTimestampArr) + '.pkl'
        print(latestModelFileName)

        file = fileService.get_file_to_bytes('trainedmodels',
                                             customer + '/' + modelName,
                                             latestModelFileName)
        model = pickle.loads(file.content)['model']
        return model
    else:
        print('Customer or model not found.')
示例#16
0
def create_snapshot(file_share, directory_name, file_name, container_name, correlation_guid = str(uuid.uuid4())):
    file_service = FileService(account_name=STORAGE_ACCOUNT_NAME, account_key=STORAGE_ACCOUNT_KEY)
    blob_service = BlockBlobService(account_name=STORAGE_ACCOUNT_NAME, account_key=STORAGE_ACCOUNT_KEY)
    file_sas_token = file_service.generate_file_shared_access_signature(
        file_share,
        directory_name,
        file_name,
        permission = FilePermissions.READ,
        expiry = datetime.now() + timedelta(minutes = 10))

    file_url = file_service.make_file_url(file_share, directory_name, file_name, sas_token = file_sas_token)

    blob_name = '{0}/{1}/{2}'.format(correlation_guid, directory_name, file_name)
    blob_service.create_container(container_name)

    try:
        blob_service.copy_blob(container_name, blob_name, file_url)
    except Exception as e:
        raise ValueError('Missing file ' + file_name)

    blob_sas_token = blob_service.generate_blob_shared_access_signature(
        container_name,
        blob_name,
        permission = BlobPermissions.READ,
        expiry = datetime.now() + timedelta(days = 1000))

    return blob_service.make_blob_url(container_name, blob_name, sas_token = blob_sas_token)
示例#17
0
def create_and_attach_file_storage(cfg, ws):
    if len(cfg.DataReference.localDirectoryFilesList) > 0:
        for ref in cfg.DataReference.localDirectoryFilesList:
            log.info("Attempting to create file share '%s' on storage account '%s'.", ref.remoteFileShare, ref.storageAccountName)
            file_service = FileService(ref.storageAccountName, ref.storageAccountKey)
            exist = file_service.create_share(ref.remoteFileShare, fail_on_exist=False)
            if exist:
                log.info("File Share '%s' on storage account '%s' created.", ref.remoteFileShare, ref.storageAccountName)
            else:
                log.info("File Share '%s' on storage account '%s' already existed.", ref.remoteFileShare, ref.storageAccountName)
            # Get most recent list of datastores linked to current workspace
            datastores = ws.datastores()
            # Validate if share_ds is created
            ds = None if ref.dataref_id not in datastores else Datastore(workspace = ws, name = ref.dataref_id)
            # Register the DS to the workspace
            if ds:
                if ds.account_name == ref.storageAccountName and ds.container_name == ref.remoteFileShare:
                    recreate = False
                else:
                    recreate = True
                    # also remove the existing reference
                    ds.unregister()
            else:
                recreate = True
            if recreate:
                log.info('Registering file share "{}" to AML datastore for AML workspace "{}" under datastore id "{}".'.format(ref.remoteFileShare, ws.name, ref.dataref_id))
                ds = Datastore.register_azure_file_share(workspace = ws,
                                                    datastore_name = ref.dataref_id, 
                                                    file_share_name = ref.remoteFileShare, 
                                                    account_name = ref.storageAccountName, 
                                                    account_key= ref.storageAccountKey,
                                                    overwrite=True,
                                                    )
            else:
                log.info('File share "{}" under AML workspace "{}" already registered under datastore id "{}".'.format(ref.remoteFileShare, ws.name, ref.dataref_id))
示例#18
0
文件: main.py 项目: ilovecee/ef
def delete_result(filename):
  AzureStorageAccount = 'effiles'
  key = 'axLykwdLsUwKTDY5flU6ivGrt9obV38k2UMVDCSpLYE3K6jAkwsjWOThQydhuMSWHfx6lTq102gdkas/GyKhEA=='
  down_path = 'results'
  path1 = 'efficientfrontier'
  file_service = FileService(account_name = AzureStorageAccount, account_key = key)
  file_service.delete_file (path1, down_path, filename)
  # Create Cursor
  cur = mysql.connection.cursor()

  # Execute
  cur.execute ('DELETE FROM result_files WHERE filename = %s', [filename])

  # Commit to DB
  mysql.connection.commit()

  # Close connection
  cur.close()

  target = os.path.join(APP_ROOT, 'results/')
  destination = '/'.join([target, filename])
  
  if os.path.exists(destination):
    os.remove(destination)


  flash ('File Deleted', 'success')
    
  return redirect(url_for('results'))
示例#19
0
def _configure_auto_storage(cli_ctx, location):
    """Configures auto storage account for the cluster

    :param str location: location for the auto-storage account.
    :return (str, str): a tuple with auto storage account name and key.
    """
    from azure.mgmt.resource.resources.models import ResourceGroup
    from azure.storage.file import FileService
    from azure.storage.blob import BlockBlobService
    resource_group = _get_auto_storage_resource_group()
    resource_client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
    if resource_client.resource_groups.check_existence(resource_group):
        logger.warning('BatchAI will use existing %s resource group for auto-storage account',
                       resource_group)
    else:
        logger.warning('Creating %s resource for auto-storage account', resource_group)
        resource_client.resource_groups.create_or_update(
            resource_group, ResourceGroup(location=location))
    storage_client = _get_storage_management_client(cli_ctx)
    account = None
    for a in storage_client.storage_accounts.list_by_resource_group(resource_group):
        if a.primary_location == location.lower().replace(' ', ''):
            account = a.name
            logger.warning('Using existing %s storage account as an auto-storage account', account)
            break
    if account is None:
        account = _create_auto_storage_account(storage_client, resource_group, location)
        logger.warning('Created auto storage account %s', account)
    key = _get_storage_account_key(cli_ctx, account, None)
    file_service = FileService(account, key)
    file_service.create_share(AUTO_STORAGE_SHARE_NAME, fail_on_exist=False)
    blob_service = BlockBlobService(account, key)
    blob_service.create_container(AUTO_STORAGE_CONTAINER_NAME, fail_on_exist=False)
    return account, key
示例#20
0
    def create_file_share(self, storage_account_name: str, share_name: str,
                          size: int, key: str) -> FileService:
        self.logger.info("Creating file share")
        file_service = FileService(account_name=storage_account_name,
                                   account_key=key)
        file_service.create_share(share_name, quota=size)

        return file_service
示例#21
0
 def create_share_name(self, remote_folder):
     parse_url = _parse_url(remote_folder)
     key = self.storage_client.storage_accounts.list_keys(
         self.resource_group_name, parse_url.account).keys[0].value
     fs = FileService(account_name=parse_url.account, account_key=key)
     return fs.create_directory(
         share_name=parse_url.container_or_share_name,
         directory_name=parse_url.path)
示例#22
0
 def __init__(self):
     # fetch config data
     conf = Configuration()
     # create Azure File share service
     self.file_service = FileService(
         account_name=conf.account_name, account_key=conf.account_key)
     # set azure share file name (container)
     self.file_share = conf.file_share
    def __init__(self, ac, key, fileshare):
        self.account_name = ac
        self.account_key = key
        self.fileshare_name = fileshare

        #Create a FileService that is used to call the File Service for the storage account
        self.file_service = FileService(account_name=ac, account_key=key)

        return
示例#24
0
def initialize(folder, filename, share="models-share") -> rt.InferenceSession:
    file_service = FileService(account_name=FILE_ACCOUNT_NAME, account_key=FILE_ACCOUNT_KEY)

    onnx_file = file_service.get_file_to_bytes(share, folder, filename)
    
    print(f"Inference session initialized")
    inference_session = rt.InferenceSession(onnx_file.content)

    return inference_session
示例#25
0
def create_azure_fileshare(share_prefix, account_name, account_key):
    """
    Generate a unique share name to avoid overlaps in shared infra
    :param share_prefix:
    :param account_name:
    :param account_key:
    :return:
    """

    # FIXME - Need to remove hardcoded directoty link below

    d_dir = './WebInDeploy/bootstrap'
    share_name = "{0}-{1}".format(share_prefix.lower(), str(uuid.uuid4()))
    print('using share_name of: {}'.format(share_name))

    # archive_file_path = _create_archive_directory(files, share_prefix)

    try:
        # ignore SSL warnings - bad form, but SSL Decrypt causes issues with this
        s = requests.Session()
        s.verify = False

        file_service = FileService(account_name=account_name,
                                   account_key=account_key,
                                   request_session=s)

        # print(file_service)
        if not file_service.exists(share_name):
            file_service.create_share(share_name)

        for d in ['config', 'content', 'software', 'license']:
            print('creating directory of type: {}'.format(d))
            if not file_service.exists(share_name, directory_name=d):
                file_service.create_directory(share_name, d)

            # FIXME - We only handle bootstrap files.  May need to handle other dirs

            if d == 'config':
                for filename in os.listdir(d_dir):
                    print('creating file: {0}'.format(filename))
                    file_service.create_file_from_path(
                        share_name, d, filename, os.path.join(d_dir, filename))

    except AttributeError as ae:
        # this can be returned on bad auth information
        print(ae)
        return "Authentication or other error creating bootstrap file_share in Azure"

    except AzureException as ahe:
        print(ahe)
        return str(ahe)
    except ValueError as ve:
        print(ve)
        return str(ve)

    print('all done')
    return share_name
示例#26
0
 def __init__(self, local_root: Path, afs_creds: dict = None):
     if afs_creds is None:
         afs_creds = get_afs_creds()
     self.afs_name = afs_creds["AFS_NAME"]
     self.afs_key = afs_creds["AFS_KEY"]
     self.afs_share = afs_creds["AFS_SHARE"]
     self.file_service = FileService(account_name=self.afs_name,
                                     account_key=self.afs_key)
     self.local_root = Path(local_root)
示例#27
0
def _create_file_share(storage_account, storage_account_key):
    """Creates Azure Files in the storage account to be mounted into a cluster

    :param str storage_account: name of the storage account.
    :param str storage_account_key: storage account key.
    """
    if storage_account == FAKE_STORAGE.name:
        return
    service = FileService(storage_account, storage_account_key)
    service.create_share(AZURE_FILES_NAME)
示例#28
0
def file_storage_connect():
    file_service = FileService(account_name=file_storage_user, account_key=file_storage_pwd, socket_timeout=15)
    try:
        if file_service.exists(file_storage_share):
            print('Connection to Azure file storage successfully established...')
        else:
            print('Filaed to connect to Asure file storage, share does not exist: ' + file_storage_share)
    except Exception as ex:
        print('Error connecting to Azure file storage: ', ex)
    return file_service
示例#29
0
    def _create_file_share(storage_account, storage_account_key):
        """Creates Azure Files in the storage account to be mounted into a cluster

        :param str storage_account: name of the storage account.
        :param str storage_account_key: storage account key.
        """
        if storage_account == Helpers.FAKE_STORAGE.name:
            return
        service = FileService(storage_account, storage_account_key)
        service.create_share(Helpers.AZURE_FILES_NAME)
 def __init__(self, config, metadata):
     super(S3AzureFileBypass, self).__init__(config, metadata)
     from azure.storage.file import FileService
     self.azure_service = FileService(
         self.read_option('writer', 'account_name'),
         self.read_option('writer', 'account_key'))
     self.share = self.read_option('writer', 'share')
     self.filebase_path = self._format_filebase_path(
         self.read_option('writer', 'filebase'))
     self._ensure_path(self.filebase_path)
示例#31
0
def upload_scripts(config, job_name, filenames):
    service = FileService(config.storage_account['name'],
                          config.storage_account['key'])
    service.create_directory(config.fileshare_name,
                             job_name,
                             fail_on_exist=False)
    trasfer_file = lambda fname: service.create_file_from_path(
        config.fileshare_name, job_name, os.path.basename(fname), fname)
    for filename in filenames:
        trasfer_file(filename)
示例#32
0
def run(job, **kwargs):
    resource = kwargs.get('resource')
    create_custom_fields_as_needed()

    storage_account = '{{ storage_account }}'
    file = "{{ file }}"
    azure_storage_file_share_name = '{{ azure_storage_file_share_name }}'
    file_name = Path(file).name
    if file.startswith(settings.MEDIA_URL):
        set_progress("Converting relative URL to filesystem path")
        file = file.replace(settings.MEDIA_URL, settings.MEDIA_ROOT)

    account_key = Resource.objects.filter(name__icontains='{{ storage_account }}')[0].azure_account_key
    fallback_account_key = Resource.objects.filter(name__icontains="{{ storage_account }}")[0].azure_account_key_fallback

    set_progress("Connecting To Azure...")
    file_service = FileService(account_name=storage_account, account_key=account_key)

    set_progress('Creating a file share...')
    file_service.create_share(share_name=azure_storage_file_share_name, quota=1)

    set_progress('Creating a file...')
    if file_service.exists(share_name=azure_storage_file_share_name, file_name=file_name, directory_name=''):
        file_service.create_file_from_path(share_name=azure_storage_file_share_name, file_name=file_name, directory_name='', local_file_path=file)
        return "WARNING", "File with this name already exists", "The file will be updated."
    else:
        file_service.create_file_from_path(share_name=azure_storage_file_share_name, file_name=file_name, directory_name='', local_file_path=file)
        resource.name = azure_storage_file_share_name + '-' + file_name
        resource.azure_storage_account_name = storage_account
        resource.azure_account_key = account_key
        resource.azure_account_key_fallback = fallback_account_key
        resource.azure_storage_file_share_name = azure_storage_file_share_name
        resource.azure_storage_file_name = file_name
        resource.save()
    return "Success", "The File has succesfully been uploaded", ""
class S3AzureFileBypass(BaseS3Bypass):
    """
    Bypass executed by default when data source is an S3 bucket and data destination
    is an Azure share.
    It should be transparent to user. Conditions are:

        - S3Reader and AzureFileWriter are used on configuration.
        - No filter modules are set up.
        - No transform module is set up.
        - No grouper module is set up.
        - AzureFileWriter has not a items_limit set in configuration.
        - AzureFileWriter has default items_per_buffer_write and size_per_buffer_write per default.
    """

    def __init__(self, config, metadata):
        super(S3AzureFileBypass, self).__init__(config, metadata)
        from azure.storage.file import FileService
        self.azure_service = FileService(
            self.read_option('writer', 'account_name'),
            self.read_option('writer', 'account_key'))
        self.share = self.read_option('writer', 'share')
        self.filebase_path = self._format_filebase_path(self.read_option('writer', 'filebase'))
        self._ensure_path(self.filebase_path)

    @classmethod
    def meets_conditions(cls, config):
        if not config.writer_options['name'].endswith('AzureFileWriter'):
            cls._log_skip_reason('Wrong reader configured')
            return False
        return super(S3AzureFileBypass, cls).meets_conditions(config)

    def _format_filebase_path(self, filebase):
        filebase_with_date = datetime.datetime.now().strftime(filebase)
        # warning: we strip file prefix here, could be unexpected
        filebase_path, prefix = os.path.split(filebase_with_date)
        return filebase_path

    def _ensure_path(self, filebase):
        path = filebase.split('/')
        folders_added = []
        for sub_path in path:
            folders_added.append(sub_path)
            parent = '/'.join(folders_added)
            self.azure_service.create_directory(self.share, parent)

    @retry_long
    def _copy_s3_key(self, key):
        file_name = key.name.split('/')[-1]
        self.azure_service.copy_file(
            self.share,
            self.filebase_path,
            file_name,
            key.generate_url(S3_URL_EXPIRES_IN)
        )
示例#34
0
def num2():
    file = request.form['name']
    file += ".jpg"
    file_service = FileService(
        account_name='mystorge',
        account_key=
        '0T4f/dzyV7AIw4a9bevK5ysML0qP55CEWEqJyJWXyr6fKRxowLq8tL7mep/MfSc//mcQggeH1+K79A4HUDug3w=='
    )
    filename = 'out.jpg'
    file_service.get_file_to_path('image1', None, file, filename)
    return send_file(filename)
示例#35
0
 def __init__(self, options, meta, *args, **kw):
     from azure.storage.file import FileService
     super(AzureFileWriter, self).__init__(options, meta, *args, **kw)
     account_name = self.read_option('account_name')
     account_key = self.read_option('account_key')
     self.azure_service = FileService(account_name, account_key)
     self.share = self.read_option('share')
     self.azure_service.create_share(self.share)
     self.logger.info('AzureWriter has been initiated.'
                      'Writing to share {}'.format(self.share))
     self.set_metadata('files_counter', Counter())
     self.set_metadata('files_written', [])
示例#36
0
文件: main.py 项目: ilovecee/ef
def download_result(filename):
  AzureStorageAccount = 'effiles'
  key = 'axLykwdLsUwKTDY5flU6ivGrt9obV38k2UMVDCSpLYE3K6jAkwsjWOThQydhuMSWHfx6lTq102gdkas/GyKhEA=='
  down_path = 'results'
  path1 = 'efficientfrontier'
  file_service = FileService(account_name = AzureStorageAccount, account_key = key)
  target = os.path.join(APP_ROOT, 'results/')
  destination = '/'.join([target, filename])
  file_service.get_file_to_path (path1, down_path, filename, destination)

  
  return send_file(destination, attachment_filename = filename)
示例#37
0
def assert_file_in_file_share(test, storage_account, storage_account_key, directory, filename, expected_content):
    """Checks if there is a file with given name and content exists in the Azure File share.

    :param AzureMgmtTestCase test: test instance.
    :param str storage_account: storage account name.
    :param str storage_account_key: storage account key.
    :param str directory: folder.
    :param str filename: filename.
    :param unicode expected_content: expected content.
    """
    if not test.is_live:
        return
    service = FileService(storage_account, storage_account_key)
    actual = service.get_file_to_text(AZURE_FILES_NAME, directory, filename).content
    test.assertEqual(expected_content, actual)
 def __init__(self, config, metadata):
     super(S3AzureFileBypass, self).__init__(config, metadata)
     from azure.storage.file import FileService
     self.azure_service = FileService(
         self.read_option('writer', 'account_name'),
         self.read_option('writer', 'account_key'))
     self.share = self.read_option('writer', 'share')
     self.filebase_path = self._format_filebase_path(self.read_option('writer', 'filebase'))
     self._ensure_path(self.filebase_path)
示例#39
0
 def openGFWListFile(self):
     azureFileService = FileService(account_name=self.azureAccountName, account_key=self.azureAccountKey)
     # the following snippet creates a file share and a directory
     # azureFileService.create_share('myshare')
     # azureFileService.create_directory('myshare', 'GFWListEditor')
     
     # this scans the file share for all directories
     # generator = azureFileService.list_directories_and_files('myshare')
     # for fileOrDir in generator:
     #    print(fileOrDir.name)
     
     # this uploads a file to the target directory
     # azureFileService.create_file_from_path('myshare', 'GFWListEditor', self.gfwlistFile.rsplit('/', 1)[1], self.gfwlistFile) 
     
     # this downloads a file to a stream
     gfwlistFileStream = BytesIO()
     azureFileService.get_file_to_stream(self.azureFileShareName, self.azureFileShareFileDir, self.azureFileShareFileName, gfwlistFileStream)
     gfwlistFileStream.seek(0)
     return TextIOWrapper(gfwlistFileStream)
示例#40
0
 def cleanupBackups(self):
     result = messagebox.askquestion('You are about to save the changes', 'Are you sure?', icon='warning')
     if result == 'yes':
         # files = []
         # for (dirpath, dirname, filenames) in walk(self.gfwlistFileDir):
         #     files.extend(filenames)
         # for f in files:
         #     print (f)
         
         # the following is for cleaning up files locally
         # bkups = glob.glob(os.path.join(self.gfwlistFileDir, '*.bk'))
         # for f in bkups[:len(bkups)-1]:
         #     os.remove(f)
         
         azureFileService = FileService(account_name=self.azureAccountName, account_key=self.azureAccountKey)
         generator = azureFileService.list_directories_and_files(self.azureFileShareName, self.azureFileShareFileDir)
         for fileOrDir in generator:
             if (fileOrDir.name.endswith('.bk')):
                 azureFileService.delete_file(self.azureFileShareName, self.azureFileShareFileDir, fileOrDir.name)
    def account_sas(self):
        share_name = self._create_share()
        metadata = {'val1': 'foo', 'val2': 'blah'}
        self.service.set_share_metadata(share_name, metadata=metadata)

        # Access to read operations on the shares themselves
        # Expires in an hour
        token = self.service.generate_account_shared_access_signature(
            ResourceTypes.CONTAINER,
            AccountPermissions.READ,
            datetime.utcnow() + timedelta(hours=1),
        )

        # Create a service and use the SAS
        sas_service = FileService(
            account_name=self.account.account_name,
            sas_token=token,
        )
        metadata = sas_service.get_share_metadata(share_name)  # metadata={'val1': 'foo', 'val2': 'blah'}

        self.service.delete_share(share_name)
示例#42
0
    def saveChanges(self):
        if (self.currentState != Application.DONE_SCANNING):
            return

        gfwlistBackupFile = self.azureFileShareFileName + '.' + datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" + '.bk')
        # messagebox.showinfo('You are about to save the changes', 'Are you sure?')
        result = messagebox.askquestion('You are about to save the changes', 'Are you sure?', icon='warning')
        if result == 'yes':
            # this is for copying file locally: copyfile(self.gfwlistFile, gfwlistBackupFile)
            azureFileService = FileService(account_name=self.azureAccountName, account_key=self.azureAccountKey)
            sourceUrl = 'https://%s.%s/%s/%s/%s' % (self.azureAccountName, self.azureFileServiceDomain, self.azureFileShareName, self.azureFileShareFileDir, self.azureFileShareFileName)
            azureFileService.copy_file(self.azureFileShareName, self.azureFileShareFileDir, gfwlistBackupFile, sourceUrl)
            # the folliwng is for writing file locally
            with open(self.gfwlistFile, 'w') as f:
                f.write(self.sectionBeforeRules.getvalue())
                f.write(',\n'.join('  "' + str(e) + '"' for e in self.listBox.get(0, END)))
                f.write('\n')
                f.write(self.sectionAfterRules.getvalue())
            
            # then write it to the file share
            azureFileService.create_file_from_path(self.azureFileShareName, self.azureFileShareFileDir, self.azureFileShareFileName, self.gfwlistFile)
示例#43
0
    def __init__(self, options, meta, *args, **kw):
        from azure.storage.file import FileService

        super(AzureFileWriter, self).__init__(options, meta, *args, **kw)
        account_name = self.read_option("account_name")
        account_key = self.read_option("account_key")
        self.azure_service = FileService(account_name, account_key)
        self.share = self.read_option("share")
        self.azure_service.create_share(self.share)
        self.logger.info("AzureWriter has been initiated." "Writing to share {}".format(self.share))
        self.set_metadata("files_counter", Counter())
        self.set_metadata("files_written", [])
    def share_sas(self):
        share_name = self._create_share()
        self.service.create_file_from_text(share_name, None, 'file1', b'hello world')

        # Access only to the files in the given share
        # Read permissions to access files
        # Expires in an hour
        token = self.service.generate_share_shared_access_signature(
            share_name,
            SharePermissions.READ,
            datetime.utcnow() + timedelta(hours=1),
        )

        # Create a service and use the SAS
        sas_service = FileService(
            account_name=self.account.account_name,
            sas_token=token,
        )

        file = sas_service.get_file_to_text(share_name, None, 'file1')
        content = file.content  # hello world

        self.service.delete_share(share_name)
    def test_job_level_mounting(self, resource_group, location, cluster, storage_account, storage_account_key):
        """Tests if it's possible to mount external file systems for a job."""
        job_name = 'job'

        # Create file share and container to mount on the job level
        if storage_account.name != helpers.FAKE_STORAGE.name:
            files = FileService(storage_account.name, storage_account_key)
            files.create_share('jobshare', fail_on_exist=False)
            blobs = BlockBlobService(storage_account.name, storage_account_key)
            blobs.create_container('jobcontainer', fail_on_exist=False)

        job = self.client.jobs.create(
            resource_group.name,
            job_name,
            parameters=models.JobCreateParameters(
                location=location,
                cluster=models.ResourceId(id=cluster.id),
                node_count=1,
                mount_volumes=models.MountVolumes(
                    azure_file_shares=[
                        models.AzureFileShareReference(
                            account_name=storage_account.name,
                            azure_file_url='https://{0}.file.core.windows.net/{1}'.format(
                                storage_account.name, 'jobshare'),
                            relative_mount_path='job_afs',
                            credentials=models.AzureStorageCredentialsInfo(
                                account_key=storage_account_key
                            ),
                        )
                    ],
                    azure_blob_file_systems=[
                        models.AzureBlobFileSystemReference(
                            account_name=storage_account.name,
                            container_name='jobcontainer',
                            relative_mount_path='job_bfs',
                            credentials=models.AzureStorageCredentialsInfo(
                                account_key=storage_account_key
                            ),
                        )
                    ]
                ),
                # Put standard output on cluster level AFS to check that the job has access to it.
                std_out_err_path_prefix='$AZ_BATCHAI_MOUNT_ROOT/{0}'.format(helpers.AZURE_FILES_MOUNTING_PATH),
                # Create two output directories on job level AFS and blobfuse.
                output_directories=[
                    models.OutputDirectory(id='OUTPUT1', path_prefix='$AZ_BATCHAI_JOB_MOUNT_ROOT/job_afs'),
                    models.OutputDirectory(id='OUTPUT2', path_prefix='$AZ_BATCHAI_JOB_MOUNT_ROOT/job_bfs')
                ],
                # Check that the job preparation has access to job level file systems.
                job_preparation=models.JobPreparation(
                    command_line='echo afs > $AZ_BATCHAI_OUTPUT_OUTPUT1/prep_afs.txt; '
                                 'echo bfs > $AZ_BATCHAI_OUTPUT_OUTPUT2/prep_bfs.txt; '
                                 'echo done'
                ),
                # Check that the job has access to job
                custom_toolkit_settings=models.CustomToolkitSettings(
                    command_line='echo afs > $AZ_BATCHAI_OUTPUT_OUTPUT1/job_afs.txt; '
                                 'echo bfs > $AZ_BATCHAI_OUTPUT_OUTPUT2/job_bfs.txt; '
                                 'mkdir $AZ_BATCHAI_OUTPUT_OUTPUT1/afs; '
                                 'echo afs > $AZ_BATCHAI_OUTPUT_OUTPUT1/afs/job_afs.txt; '
                                 'mkdir $AZ_BATCHAI_OUTPUT_OUTPUT2/bfs; '
                                 'echo bfs > $AZ_BATCHAI_OUTPUT_OUTPUT2/bfs/job_bfs.txt; '
                                 'echo done'
                )
            )
        ).result()
        self.assertEqual(
            helpers.wait_for_job_completion(self.is_live, self.client, resource_group.name, job.name,
                                            helpers.MINUTE),
            models.ExecutionState.succeeded)

        job = self.client.jobs.get(resource_group.name, job.name)
        # Assert job and job prep standard output is populated on cluster level filesystem
        helpers.assert_job_files_are(self, self.client, resource_group.name, job.name,
                                     helpers.STANDARD_OUTPUT_DIRECTORY_ID,
                                     {u'stdout.txt': u'done\n', u'stderr.txt': u'',
                                      u'stdout-job_prep.txt': u'done\n', u'stderr-job_prep.txt': u''})
        # Assert files are generated on job level AFS
        helpers.assert_job_files_are(self, self.client, resource_group.name, job.name, 'OUTPUT1',
                                     {u'job_afs.txt': u'afs\n', u'prep_afs.txt': u'afs\n', u'afs': None})
        # Assert files are generated on job level blobfuse
        helpers.assert_job_files_are(self, self.client, resource_group.name, job.name, 'OUTPUT2',
                                     {u'job_bfs.txt': u'bfs\n', u'prep_bfs.txt': u'bfs\n', u'bfs': None})
        # Assert subfolders are available via API
        helpers.assert_job_files_in_path_are(self, self.client, resource_group.name, job.name, 'OUTPUT1',
                                             'afs', {u'job_afs.txt': u'afs\n'})
        helpers.assert_job_files_in_path_are(self, self.client, resource_group.name, job.name, 'OUTPUT2',
                                             'bfs', {u'job_bfs.txt': u'bfs\n'})

        # Assert that we can access the output files created on job level mount volumes directly in storage using path
        # segment returned by the server.
        if storage_account.name != helpers.FAKE_STORAGE.name:
            files = FileService(storage_account.name, storage_account_key)
            self.assertTrue(
                files.exists('jobshare', job.job_output_directory_path_segment +
                             '/' + helpers.OUTPUT_DIRECTORIES_FOLDER_NAME, 'job_afs.txt'))
            blobs = BlockBlobService(storage_account.name, storage_account_key)
            self.assertTrue(
                blobs.exists('jobcontainer', job.job_output_directory_path_segment +
                             '/' + helpers.OUTPUT_DIRECTORIES_FOLDER_NAME + '/job_bfs.txt'))
        # After the job is done the filesystems should be unmounted automatically, check this by submitting a new job.
        checker = self.client.jobs.create(
            resource_group.name,
            'checker',
            parameters=models.JobCreateParameters(
                location=location,
                cluster=models.ResourceId(id=cluster.id),
                node_count=1,
                std_out_err_path_prefix='$AZ_BATCHAI_MOUNT_ROOT/{0}'.format(helpers.AZURE_FILES_MOUNTING_PATH),
                custom_toolkit_settings=models.CustomToolkitSettings(
                    command_line='echo job; df | grep -E "job_bfs|job_afs"'
                )
            )
        ).result()
        # Check the job failed because there are not job level mount volumes anymore
        self.assertEqual(
            helpers.wait_for_job_completion(self.is_live, self.client, resource_group.name, checker.name,
                                            helpers.MINUTE),
            models.ExecutionState.failed)
        # Check that the cluster level AFS was still mounted
        helpers.assert_job_files_are(self, self.client, resource_group.name, checker.name,
                                     helpers.STANDARD_OUTPUT_DIRECTORY_ID,
                                     {u'stdout.txt': u'job\n', u'stderr.txt': u''})
示例#46
0
#!/usr/bin/env python

from azure.storage.file import FileService
file_service = FileService(account_name, account_key1)
#file_service.create_share('myshare')

#file_service.create_directory(
#    'myshare',
#    'uploads',
#)

#from azure.storage.file import FileService

#file_service.put_file_from_path(
#    'myshare',
#        'uploads',
#    'azurefile.txt',
#    'localfile.txt',
#    max_connections=5,
#)


with open('localfile.txt') as localfile:
    file_service.put_file_from_stream(
        'myshare',
            'uploads',
        'remote.txt',
        localfile,
        count=50000,
        max_connections=4,
    )
示例#47
0
class AzureFileWriter(FilebaseBaseWriter):
    """
    Writes items to azure file shares. It is a File Based writer, so it has filebase
    option available

        - account_name (str)
            Public acces name of the azure account.

        - account_key (str)
            Public acces key to the azure account.

        - share (str)
            File share name.

        - filebase (str)
            Base path to store the items in the share.

    """

    supported_options = {
        "account_name": {"type": six.string_types, "env_fallback": "EXPORTERS_AZUREWRITER_NAME"},
        "account_key": {"type": six.string_types, "env_fallback": "EXPORTERS_AZUREWRITER_KEY"},
        "share": {"type": six.string_types},
    }

    def __init__(self, options, meta, *args, **kw):
        from azure.storage.file import FileService

        super(AzureFileWriter, self).__init__(options, meta, *args, **kw)
        account_name = self.read_option("account_name")
        account_key = self.read_option("account_key")
        self.azure_service = FileService(account_name, account_key)
        self.share = self.read_option("share")
        self.azure_service.create_share(self.share)
        self.logger.info("AzureWriter has been initiated." "Writing to share {}".format(self.share))
        self.set_metadata("files_counter", Counter())
        self.set_metadata("files_written", [])

    def write(self, dump_path, group_key=None, file_name=None):
        if group_key is None:
            group_key = []
        self._write_file(dump_path, group_key, file_name)

    def _update_metadata(self, dump_path, filebase_path, file_name):
        buffer_info = self.write_buffer.metadata[dump_path]
        file_info = {
            "file_name": file_name,
            "filebase_path": filebase_path,
            "size": buffer_info["size"],
            "number_of_records": buffer_info["number_of_records"],
        }
        files_written = self.get_metadata("files_written")
        files_written.append(file_info)
        self.set_metadata("files_written", files_written)
        self.get_metadata("files_counter")[filebase_path] += 1

    def _ensure_path(self, filebase):
        path = filebase.split("/")
        folders_added = []
        for sub_path in path:
            folders_added.append(sub_path)
            parent = "/".join(folders_added)
            self.azure_service.create_directory(self.share, parent)

    @retry_long
    def _write_file(self, dump_path, group_key, file_name=None):
        filebase_path, file_name = self.create_filebase_name(group_key, file_name=file_name)
        self._ensure_path(filebase_path)
        self.azure_service.create_file_from_path(self.share, filebase_path, file_name, dump_path, max_connections=5)
        self._update_metadata(dump_path, filebase_path, file_name)

    def get_file_suffix(self, path, prefix):
        number_of_keys = self.get_metadata("files_counter").get(path, 0)
        suffix = "{}".format(str(number_of_keys))
        return suffix

    def _check_write_consistency(self):
        from azure.common import AzureMissingResourceHttpError

        for file_info in self.get_metadata("files_written"):
            try:
                afile = self.azure_service.get_file_properties(
                    self.share, file_info["filebase_path"], file_info["file_name"]
                )
                file_size = afile.properties.content_length
                if str(file_size) != str(file_info["size"]):
                    raise InconsistentWriteState(
                        "File {} has unexpected size. (expected {} - got {})".format(
                            file_info["file_name"], file_info["size"], file_size
                        )
                    )
            except AzureMissingResourceHttpError:
                raise InconsistentWriteState("Missing file {}".format(file_info["file_name"]))
        self.logger.info("Consistency check passed")