Beispiel #1
0
    def _create_instance(self):
        """
        Initialize and create instance.
        """
        futures = []
        with _ThreadPoolExecutor(max_workers=6) as executor:
            # Run configuration in parallel
            policy = executor.submit(self._init_policy)
            role = executor.submit(self._init_role)
            instance_profile = executor.submit(self._init_instance_profile)
            for method in (self._init_key_pair, self._init_security_group,
                           self._init_block_device_mappings):
                futures.append(executor.submit(method))

            # Wait that role, instance_profile and policy are completed
            # to attach them
            for future in _as_completed((policy, instance_profile)):
                role.result()
                futures.append(
                    executor.submit(self._attach_role_policy if future.result(
                    ) == 'policy' else self._attach_instance_profile_role))

        # Wait completion
        for future in _as_completed(futures):
            future.result()
Beispiel #2
0
    def _flush(self, buffer, start, end):
        """
        Flush the write buffer of the stream if applicable.

        Args:
            buffer (memoryview): Buffer content.
            start (int): Start of buffer position to flush.
                Supported only with page blobs.
            end (int): End of buffer position to flush.
                Supported only with page blobs.
        """
        buffer_size = len(buffer)
        if not buffer_size:
            return

        # Write range normally
        with self._size_lock:
            if end > self._size:
                # Require to resize the blob if note enough space
                with _handle_azure_exception():
                    self._resize(content_length=end, **self._client_kwargs)
                self._reset_head()

        if buffer_size > self.MAX_FLUSH_SIZE:
            # Too large buffer, needs to split in multiples requests
            futures = []
            for part_start in range(0, buffer_size, self.MAX_FLUSH_SIZE):

                # Split buffer
                buffer_part = buffer[part_start:part_start +
                                     self.MAX_FLUSH_SIZE]
                if not len(buffer_part):
                    # No more data
                    break

                # Upload split buffer in parallel
                start_range = start + part_start
                futures.append(
                    self._workers.submit(self._update_range,
                                         data=buffer_part.tobytes(),
                                         start_range=start_range,
                                         end_range=start_range +
                                         len(buffer_part) - 1,
                                         **self._client_kwargs))

            with _handle_azure_exception():
                # Wait for upload completion
                for future in _as_completed(futures):
                    future.result()

        else:
            # Buffer lower than limit, do one requests.
            with _handle_azure_exception():
                self._update_range(data=buffer.tobytes(),
                                   start_range=start,
                                   end_range=end - 1,
                                   **self._client_kwargs)
Beispiel #3
0
    def _create_instance(self):
        """
        Initialize and create instance.
        """
        futures = []
        with _ThreadPoolExecutor(max_workers=5) as executor:
            # Run configuration in parallel
            policy = executor.submit(self._init_policy)
            role = executor.submit(self._init_role)
            for method in (self._init_key_pair, self._init_security_group):
                futures.append(executor.submit(method))

            # Wait that role and policy are completed to attach them
            for future in _as_completed((policy, role)):
                future.result()
            futures.append(executor.submit(self._attach_role_policy))

        # Wait completion
        for future in _as_completed(futures):
            future.result()
Beispiel #4
0
    def _flush(self, buffer, start, end):
        """
        Flush the write buffer of the stream if applicable.

        Args:
            buffer (memoryview): Buffer content.
            start (int): Start of buffer position to flush.
                Supported only with page blobs.
            end (int): End of buffer position to flush.
                Supported only with page blobs.
        """
        buffer_size = len(buffer)
        if not buffer_size:
            return

        with self._size_lock:
            if end > self._size:
                with _handle_azure_exception():
                    self._resize(content_length=end, **self._client_kwargs)
                self._reset_head()

        if buffer_size > self.MAX_FLUSH_SIZE:
            futures = []
            for part_start in range(0, buffer_size, self.MAX_FLUSH_SIZE):
                buffer_part = buffer[part_start:part_start +
                                     self.MAX_FLUSH_SIZE]
                if not len(buffer_part):
                    break

                start_range = start + part_start
                futures.append(
                    self._workers.submit(
                        self._update_range,
                        data=buffer_part.tobytes(),
                        start_range=start_range,
                        end_range=start_range + len(buffer_part) - 1,
                        **self._client_kwargs,
                    ))

            with _handle_azure_exception():
                for future in _as_completed(futures):
                    future.result()

        else:
            with _handle_azure_exception():
                self._update_range(
                    data=buffer.tobytes(),
                    start_range=start,
                    end_range=end - 1,
                    **self._client_kwargs,
                )
Beispiel #5
0
    def _create_instance(self):
        """
        Initializes and creates instance.
        """
        # Run configuration in parallel
        futures = []
        with _ThreadPoolExecutor(
                max_workers=len(self._INIT_METHODS)) as executor:
            for method in self._INIT_METHODS:
                futures.append(executor.submit(getattr(self, method)))

        # Wait completion
        for future in _as_completed(futures):
            future.result()
Beispiel #6
0
    def __init__(self):
        # Avoid double __exit__ / __del__ call
        self._activated = False

        # Handle SIGTERM like SIGINT (systemd stop services using SIGTERM)
        _signal.signal(_signal.SIGTERM, self._interrupt)

        # Get Systemd notify socket
        self._sd_notify_address = self._get_sd_notify_socket()

        # Get FPGA slots configuration
        self._fpga_slots = dict()

        for env_key in _environ:
            for env, key in (('ACCELIZE_DRM_DRIVER_', 'fpga_driver_name'),
                             ('ACCELIZE_DRM_CRED_', 'cred_file_path'),
                             ('ACCELIZE_DRM_CONF_', 'conf_file_path'),
                             ('ACCELIZE_DRM_IMAGE_', 'fpga_image'),
                             ('ACCELIZE_DRM_DISABLED_', 'drm_disabled')):

                if env_key.startswith(env):
                    slot = int(env_key.rsplit('_', maxsplit=1)[1])

                    try:
                        slot_dict = self._fpga_slots[slot]
                    except KeyError:
                        # Add slot to configuration
                        self._fpga_slots[slot] = slot_dict = dict()

                    slot_dict[key] = _environ[env_key]

        if not self._fpga_slots:
            # If no configuration passed by environment, activate default slot
            self._fpga_slots[self.DEFAULT_FPGA_SLOT_ID] = dict()

        # Initialize DRM manager
        self._drivers = []
        self._drm_managers = []
        self._lisenced_slots = []

        futures = []
        with _ThreadPoolExecutor() as executor:
            for fpga_slot_id in self._fpga_slots:
                futures.append(executor.submit(
                    self._init_drm_manager, int(fpga_slot_id)))

        with self._handle_exception((RuntimeError, OSError, _DRMException)):
            for future in _as_completed(futures):
                future.result()
Beispiel #7
0
    def __exit__(self, exc_type, exc_val, exc_tb):
        if self._activated:
            self._activated = False
            self._sd_notify(b"STOPPING=1")

            # Deactivate DRM manager for all slots
            futures = []
            with _ThreadPoolExecutor() as executor:
                for drm_manager in self._drm_managers:
                    futures.append(executor.submit(drm_manager.deactivate))
            try:
                with self._handle_exception(_DRMException):
                    for future in _as_completed(futures):
                        future.result()

            finally:
                self._drivers.clear()
                self._drm_managers.clear()
Beispiel #8
0
    def __enter__(self):
        self._activated = True

        # Activate DRM manager for all slots
        futures = []
        with _ThreadPoolExecutor() as executor:
            for drm_manager in self._drm_managers:
                futures.append(executor.submit(drm_manager.activate))

        with self._handle_exception(_DRMException):
            for future in _as_completed(futures):
                future.result()

        # Notify systemd
        self._sd_notify(b"READY=1\nSTATUS=Licensing FPGA slot(s) %s" %
                        ', '.join(self._lisenced_slots).encode())

        return self
Beispiel #9
0
def _auto_mount():
    """mounts from configuration"""
    # Get configuration
    config = _cfg.Configuration()

    # Finds possibles storage
    to_mount = set()
    name = config['host']['host_type']
    if name:
        to_mount.add(name)
    for section in config:
        if section.startswith('host.') or section.startswith('storage.'):
            name = section.split('.', 1)[1]
            if name:
                to_mount.add(name)

    if to_mount:
        # Tries to mount storage
        if _py[0] == 2:
            # On Python 2: Seem to have a deadlock on import if use of
            # ThreadPoolExecutor
            for storage_type in to_mount:
                try:
                    mount(storage_type=storage_type, config=config)
                except (ImportError, _exc.AcceleratorException):
                    continue
            return

        futures = []
        with _ThreadPoolExecutor(max_workers=len(to_mount)) as executor:
            for storage_type in to_mount:
                try:
                    storage = _Storage(storage_type=storage_type,
                                       config=config)
                except (ImportError, _exc.AcceleratorException):
                    continue
                futures.append(executor.submit(storage.mount))

            # Waits completion
            for future in _as_completed(futures):
                future.result()