예제 #1
0
def create_file_share_saskey(
        storage_settings, file_share, kind, create_share=False):
    # type: (StorageCredentialSettings, str, str, bool) -> str
    """Create a saskey for a file share with a 7day expiry time
    :param StorageCredentialsSettings storage_settings: storage settings
    :param str file_share: file share
    :param str kind: ingress or egress
    :param bool create_share: create file share
    :rtype: str
    :return: saskey
    """
    file_client = azurefile.FileService(
        account_name=storage_settings.account,
        account_key=storage_settings.account_key,
        endpoint_suffix=storage_settings.endpoint)
    if create_share:
        file_client.create_share(file_share, fail_on_exist=False)
    if kind == 'ingress':
        perm = azurefile.SharePermissions(read=True, list=True)
    elif kind == 'egress':
        perm = azurefile.SharePermissions(
            read=True, write=True, delete=True, list=True)
    else:
        raise ValueError('{} type of transfer not supported'.format(kind))
    return file_client.generate_share_shared_access_signature(
        file_share, perm,
        expiry=datetime.datetime.utcnow() +
        datetime.timedelta(days=_DEFAULT_SAS_EXPIRY_DAYS)
    )
예제 #2
0
    def __init__(self, azure_storage_account_name, azure_file_share_name, sas_token):
        LoggingMixIn.log.addHandler(console_handler)

        logger.info("Initializing AzureFiles Fuse Driver Implementation:%s %s", azure_storage_account_name, azure_file_share_name)
        self._azure_storage_account_name = azure_storage_account_name
        self._azure_file_share_name = azure_file_share_name
        self._sas_token = sas_token.lstrip("?")
        self._files_service = file.FileService(self._azure_storage_account_name, sas_token=self._sas_token, request_session=Session())
        
        self._prior_write_failure = False

        self.writes = deque()

        self.dir_cache = {}

        self.file_cache = defaultdict(FileCache)
예제 #3
0
    def setUp(self):
        # TODO: Verify Settings provided
        env_name = os.environ.get("azfilesfuse_test_accountname", None)
        if env_name is not None:
            self.STORAGE_ACCOUNT_NAME = env_name
        if self.STORAGE_ACCOUNT_NAME is None:
            raise Exception(
                "STORAGE_ACCOUNT_NAME variable necessary for running tests not set."
            )

        env_share = os.environ.get("azfilesfuse_test_accountshare", None)
        if env_share is not None:
            self.STORAGE_ACCOUNT_SHARE = env_share
        if self.STORAGE_ACCOUNT_SHARE is None:
            raise Exception(
                "STORAGE_ACCOUNT_SHARE variable necessary for running tests not set."
            )

        env_sas_token = os.environ.get("azfilesfuse_test_accountsastoken",
                                       None)
        if env_sas_token is not None:
            self.STORAGE_ACCOUNT_SAS_TOKEN = env_sas_token
        if self.STORAGE_ACCOUNT_SAS_TOKEN is None:
            raise Exception(
                "STORAGE_ACCOUNT_SAS_TOKEN variable necessary for running tests not set."
            )

        # use the azure files sdk to verify before starting our tests the share is empty.
        self.azure_fs = file.FileService(
            self.STORAGE_ACCOUNT_NAME,
            sas_token=self.STORAGE_ACCOUNT_SAS_TOKEN.lstrip('?'))

        self.delete_files_and_directories_from_share()

        # need to import this after mocking up some of its components, or else
        # this will need to create the fuse class to be tested.
        # we also want to mock out things.  though we may do that per test.
        self.fuse_driver = azfilesfuse.AzureFiles(
            self.STORAGE_ACCOUNT_NAME, self.STORAGE_ACCOUNT_SHARE,
            self.STORAGE_ACCOUNT_SAS_TOKEN)
예제 #4
0
def _get_storage_entities(task_factory, storage_settings):
    # type: (dict, settings.TaskFactoryStorageSettings) -> TaskSettings
    """Generate a task given a config
    :param dict task_factory: task factory object
    :param settings.TaskFactoryStorageSettings storage_settings:
        storage settings
    :rtype: FileInfo
    :return: file info
    """
    if not storage_settings.is_file_share:
        # create blob client
        blob_client = azureblob.BlockBlobService(
            account_name=storage_settings.storage_settings.account,
            account_key=storage_settings.storage_settings.account_key,
            endpoint_suffix=storage_settings.storage_settings.endpoint)
        # list blobs in container with include/exclude
        blobs = blob_client.list_blobs(
            container_name=storage_settings.container)
        for blob in blobs:
            if not _inclusion_check(blob.name, storage_settings.include,
                                    storage_settings.exclude):
                continue
            file_path_with_container = '{}/{}'.format(
                storage_settings.container, blob.name)
            file_name = blob.name.split('/')[-1]
            file_name_no_extension = file_name.split('.')[0]
            if task_factory['file']['task_filepath'] == 'file_path':
                task_filepath = blob.name
            elif (task_factory['file']['task_filepath'] ==
                  'file_path_with_container'):
                task_filepath = file_path_with_container
            elif task_factory['file']['task_filepath'] == 'file_name':
                task_filepath = file_name
            elif (task_factory['file']['task_filepath'] ==
                  'file_name_no_extension'):
                task_filepath = file_name_no_extension
            else:
                raise ValueError(
                    'invalid task_filepath specification: {}'.format(
                        task_factory['file']['task_filepath']))
            # create blob url
            url = 'https://{}.blob.{}/{}/{}'.format(
                storage_settings.storage_settings.account,
                storage_settings.storage_settings.endpoint,
                storage_settings.container, urlquote(blob.name))
            # create blob sas
            sas = blob_client.generate_blob_shared_access_signature(
                storage_settings.container,
                blob.name,
                permission=azureblob.BlobPermissions.READ,
                expiry=datetime.datetime.utcnow() +
                datetime.timedelta(days=_DEFAULT_SAS_EXPIRY_DAYS))
            yield FileInfo(
                is_blob=True,
                url=url,
                sas=sas,
                file_path=blob.name,
                file_path_with_container=file_path_with_container,
                file_name=file_name,
                file_name_no_extension=file_name_no_extension,
                task_filepath=task_filepath,
            )
    else:
        # create file share client
        file_client = azurefile.FileService(
            account_name=storage_settings.storage_settings.account,
            account_key=storage_settings.storage_settings.account_key,
            endpoint_suffix=storage_settings.storage_settings.endpoint)
        # list files in share with include/exclude
        for file in _list_all_files_in_fileshare(file_client,
                                                 storage_settings.container):
            if not _inclusion_check(file, storage_settings.include,
                                    storage_settings.exclude):
                continue
            file_path_with_container = '{}/{}'.format(
                storage_settings.container, file)
            file_name = file.split('/')[-1]
            file_name_no_extension = file_name.split('.')[0]
            if task_factory['file']['task_filepath'] == 'file_path':
                task_filepath = file
            elif (task_factory['file']['task_filepath'] ==
                  'file_path_with_container'):
                task_filepath = file_path_with_container
            elif task_factory['file']['task_filepath'] == 'file_name':
                task_filepath = file_name
            elif (task_factory['file']['task_filepath'] ==
                  'file_name_no_extension'):
                task_filepath = file_name_no_extension
            else:
                raise ValueError(
                    'invalid task_filepath specification: {}'.format(
                        task_factory['file']['task_filepath']))
            yield FileInfo(
                is_blob=False,
                url=None,
                sas=None,
                file_path=file,
                file_path_with_container=file_path_with_container,
                file_name=file_name,
                file_name_no_extension=file_name_no_extension,
                task_filepath=task_filepath,
            )
예제 #5
0
    def _initializePoolKeywordArgs(self, vm_size, executor_image_names=[]):
        """Returns kwargs used for pool initialization."""
        admin_user = None
        if current_app.config[
                'BATCH_NODE_ADMIN_USERNAME'] and current_app.config[
                    'BATCH_NODE_ADMIN_PASSWORD']:
            # Add a debug admin user if requested
            current_app.logger.debug(
                f"Adding user {current_app.config['BATCH_NODE_ADMIN_USERNAME']} to pool"
            )
            admin_user = [
                azbatch.models.UserAccount(
                    elevation_level=azbatch.models.ElevationLevel.admin,
                    name=current_app.config['BATCH_NODE_ADMIN_USERNAME'],
                    password=current_app.config['BATCH_NODE_ADMIN_PASSWORD'])
            ]

        pool_start_task = None
        if 'BATCH_STORAGE_FILESHARE_NAME' in current_app.config and current_app.config[
                'BATCH_STORAGE_FILESHARE_NAME'] is not None:
            current_app.logger.info(
                f"Creating pool with Azure files share '{current_app.config['BATCH_STORAGE_FILESHARE_NAME']}'"
            )
            share_name = current_app.config['BATCH_STORAGE_FILESHARE_NAME']
            file_service = azfiles.FileService(
                account_name=current_app.config['STORAGE_ACCOUNT_NAME'],
                account_key=current_app.config['STORAGE_ACCOUNT_KEY'])
            file_service.create_share(share_name)

            azfiles_mountpoint = "/mnt/batch/tasks/shared-azfiles"
            # FIXME: core.windows.net suffix could theoretically vary
            azfiles_endpoint = f"//{file_service.account_name}.file.core.windows.net/{share_name}"
            node_start_command = f'/bin/bash -c "mkdir -p {shlex.quote(azfiles_mountpoint)} && mount -t cifs {shlex.quote(azfiles_endpoint)} {shlex.quote(azfiles_mountpoint)} -o vers=3.0,username={shlex.quote(current_app.config["STORAGE_ACCOUNT_NAME"])},password={current_app.config["STORAGE_ACCOUNT_KEY"]},dir_mode=0777,file_mode=0777,serverino,mfsymlinks"'

            pool_start_task = azbatch.models.StartTask(
                command_line=node_start_command,
                wait_for_success=True,
                user_identity=azbatch.models.UserIdentity(
                    auto_user=azbatch.models.AutoUserSpecification(
                        scope=azbatch.models.AutoUserScope.pool,
                        elevation_level=azbatch.models.ElevationLevel.admin)))

        acr_registry = None
        if current_app.config['PRIVATE_DOCKER_REGISTRY_URL']:
            # Check if we need to add a private registry to the pool
            # Note images are only downloaded upon creation, never updated later
            current_app.logger.debug(
                f"Adding private Docker registry {current_app.config['PRIVATE_DOCKER_REGISTRY_URL']} to pool"
            )
            acr_registry = azbatch.models.ContainerRegistry(
                registry_server=current_app.
                config['PRIVATE_DOCKER_REGISTRY_URL'],
                user_name=current_app.
                config['PRIVATE_DOCKER_REGISTRY_USERNAME'],
                password=current_app.config['PRIVATE_DOCKER_REGISTRY_PASSWORD']
            )

        container_conf = azbatch.models.ContainerConfiguration(
            container_image_names=['alpine'] + executor_image_names,
            container_registries=[acr_registry] if acr_registry else None)

        image = azbatch.models.VirtualMachineConfiguration(
            image_reference=azbatch.models.ImageReference(
                publisher="microsoft-azure-batch",
                offer="ubuntu-server-container",
                sku="16-04-lts",
                version="latest"),
            container_configuration=container_conf,
            node_agent_sku_id="batch.node.ubuntu 16.04")

        return {
            'vm_size':
            vm_size,
            'target_dedicated_nodes':
            current_app.config['BATCH_POOL_DEDICATED_NODE_COUNT'],
            'target_low_priority_nodes':
            current_app.config['BATCH_POOL_LOW_PRIORITY_NODE_COUNT'],
            'user_accounts':
            admin_user,
            'start_task':
            pool_start_task,
            'virtual_machine_configuration':
            image
        }
예제 #6
0
config_file = 'config.ini'
config = configparser.ConfigParser()
config.read(config_file)
config_azure = config['AZURE']

# Azure
batch_account_name = config_azure['batch_account_name']
batch_account_key = config_azure['batch_account_key']
batch_account_url = config_azure['batch_account_url']
storage_account_name = config_azure['storage_account_name']
storage_account_key = config_azure['storage_account_key']
batch_credential = SharedKeyCredentials(batch_account_name, batch_account_key)
batch_service = BatchServiceClient(batch_credential, batch_account_url)
block_blob_service = blob.BlockBlobService(storage_account_name,
                                           storage_account_key)
file_service = file.FileService(storage_account_name, storage_account_key)

source_container = config_azure['source_container']
input_container = config_azure['input_container']


def application_source_upload():
    '''
	Upload related source file to Azure Blob
	'''
    source_files = []
    unique_files = []

    for folder, _, files in os.walk('../'):
        # Skip helper folder
        if os.path.abspath(folder) == os.path.abspath('./'):
예제 #7
0
def create_saskey(storage_settings, path, file, create, read, write, delete):
    # type: (settings.StorageCredentialsSettings, str, bool, bool, bool,
    #        bool, bool) -> None
    """Create an object-level sas key
    :param settings.StorageCredentialsSetting storage_settings:
        storage settings
    :param str path: path
    :param bool file: file sas
    :param bool create: create perm
    :param bool read: read perm
    :param bool write: write perm
    :param bool delete: delete perm
    :rtype: str
    :return: sas token
    """
    if file:
        client = azurefile.FileService(
            account_name=storage_settings.account,
            account_key=storage_settings.account_key,
            endpoint_suffix=storage_settings.endpoint)
        perm = azurefile.FilePermissions(read=read,
                                         create=create,
                                         write=write,
                                         delete=delete)
        tmp = path.split('/')
        if len(tmp) < 2:
            raise ValueError('path is invalid: {}'.format(path))
        share_name = tmp[0]
        if len(tmp) == 2:
            directory_name = ''
            file_name = tmp[1]
        else:
            directory_name = tmp[1]
            file_name = '/'.join(tmp[2:])
        sas = client.generate_file_shared_access_signature(
            share_name=share_name,
            directory_name=directory_name,
            file_name=file_name,
            permission=perm,
            expiry=datetime.datetime.utcnow() +
            datetime.timedelta(days=_DEFAULT_SAS_EXPIRY_DAYS))
    else:
        client = azureblob.BlockBlobService(
            account_name=storage_settings.account,
            account_key=storage_settings.account_key,
            endpoint_suffix=storage_settings.endpoint)
        perm = azureblob.BlobPermissions(read=read,
                                         create=create,
                                         write=write,
                                         delete=delete)
        tmp = path.split('/')
        if len(tmp) < 1:
            raise ValueError('path is invalid: {}'.format(path))
        container_name = tmp[0]
        blob_name = '/'.join(tmp[1:])
        sas = client.generate_blob_shared_access_signature(
            container_name=container_name,
            blob_name=blob_name,
            permission=perm,
            expiry=datetime.datetime.utcnow() +
            datetime.timedelta(days=_DEFAULT_SAS_EXPIRY_DAYS))
    return sas
예제 #8
0
	def __init__(self, access_name, access_key, access_container_list):
		self.__mpi_rank = MPI.COMM_WORLD.Get_rank()
		self.__mpi_size = MPI.COMM_WORLD.Get_size()
		self.__bench_target = 'Azure File'
		self.__storage_service = file.FileService(access_name, access_key)
def populate_file_share():
    """
        Populate File Share
        Generates a new file share if one doesn't exist and sets all the files
        into place.
    """
    file_share = file_service.FileService(account_name=STORAGE_ACCOUNT_NAME,
                                          account_key=STORAGE_ACCOUNT_KEY)

    # Creates a file share for input / output storage.
    if not file_share.create_share(share_name=FILE_SHARE_NAME, quota=1):
        LOGGER.info("File share already exists...")
    else:

        file_share.create_directory(share_name=FILE_SHARE_NAME,
                                    directory_name='logs')
        file_share.create_directory(share_name=FILE_SHARE_NAME,
                                    directory_name='last-update')
        # Create temp directory for file storage
        try:
            os.mkdir(TEMP_FILE_STORAGE)
        except FileExistsError as e:
            LOGGER.info(e)

        LOGGER.info("File share did not exist. Populating files")
        files = [
            f'{processable.name}.rds' for processable in filter(
                lambda x: isinstance(x, Dataset), SCHEDULE)
        ]

        for file in files:
            r = requests.get(ANCIL_FILES_URL + 'last-update/' + file)
            with open(TEMP_FILE_STORAGE + file, 'wb') as f:
                f.write(r.content)
            file_share.create_file_from_path(
                share_name=FILE_SHARE_NAME,
                directory_name='last-update',
                file_name=file,
                local_file_path=TEMP_FILE_STORAGE + file)

        r = requests.get(ANCIL_FILES_URL + 'runtimes.csv')
        with open(TEMP_FILE_STORAGE + 'runtimes.csv', 'wb') as f:
            f.write(r.content)
        file_share.create_file_from_path(share_name=FILE_SHARE_NAME,
                                         directory_name="",
                                         file_name='runtimes.csv',
                                         local_file_path=TEMP_FILE_STORAGE +
                                         'runtimes.csv')

        r = requests.get(ANCIL_FILES_URL + 'status.csv')
        with open(TEMP_FILE_STORAGE + 'status.csv', 'wb') as f:
            f.write(r.content)
        file_share.create_file_from_path(share_name=FILE_SHARE_NAME,
                                         directory_name="",
                                         file_name='status.csv',
                                         local_file_path=TEMP_FILE_STORAGE +
                                         'status.csv')

        file_share.create_file_from_path(share_name=FILE_SHARE_NAME,
                                         directory_name="",
                                         file_name='env_config.R',
                                         local_file_path='env_config.R')

        shutil.rmtree(TEMP_FILE_STORAGE)