Exemplo n.º 1
0
    def __init__(self, srequest, storlet_pipe_path, storlet_logger_path,
                 timeout, logger, extra_sources=None):
        self.srequest = srequest
        self.storlet_pipe_path = storlet_pipe_path
        self.storlet_logger_path = storlet_logger_path
        self.storlet_logger = StorletLogger(self.storlet_logger_path,
                                            'storlet_invoke')
        self.logger = logger
        self.timeout = timeout

        # local side file descriptors
        self.data_read_fd = None
        self.data_write_fd = None
        self.metadata_read_fd = None
        self.metadata_write_fd = None
        self.execution_str_read_fd = None
        self.execution_str_write_fd = None
        self.task_id = None
        self._input_data_read_fd = None
        self._input_data_write_fd = None

        self.extra_data_sources = []
        extra_sources = extra_sources or []
        for source in extra_sources:
            if source.has_fd:
                # TODO(kota_): it may be data_fd in the future.
                raise Exception(
                    'extra_source no requires data_fd just data_iter')
            self.extra_data_sources.append(
                {'read_fd': None, 'write_fd': None,
                 'user_metadata': source.user_metadata,
                 'data_iter': source.data_iter})

        if not os.path.exists(storlet_logger_path):
            os.makedirs(storlet_logger_path)
Exemplo n.º 2
0
    def __init__(self, srequest, storlet_pipe_path, storlet_logger_path,
                 timeout, logger, extra_sources=None):
        self.srequest = srequest
        self.storlet_pipe_path = storlet_pipe_path
        self.storlet_logger_path = storlet_logger_path
        self.storlet_logger = StorletLogger(self.storlet_logger_path,
                                            'storlet_invoke')
        self.logger = logger
        self.timeout = timeout

        # local side file descriptors
        self.data_read_fd = None
        self.data_write_fd = None
        self.metadata_read_fd = None
        self.metadata_write_fd = None
        self.execution_str_read_fd = None
        self.execution_str_write_fd = None
        self.task_id = None
        self._input_data_read_fd = None
        self._input_data_write_fd = None

        self.extra_data_sources = []
        extra_sources = extra_sources or []
        for source in extra_sources:
            if source.has_fd:
                # TODO(kota_): it may be data_fd in the future.
                raise Exception(
                    'extra_source no requires data_fd just data_iter')
            self.extra_data_sources.append(
                {'read_fd': None, 'write_fd': None,
                 'user_metadata': source.user_metadata,
                 'data_iter': source.data_iter})

        if not os.path.exists(storlet_logger_path):
            os.makedirs(storlet_logger_path)
Exemplo n.º 3
0
class StorletInvocationProtocol(object):
    """
    StorletInvocationProtocol class

    This class serves communictaion with a Docker container to run an
    application

    :param srequest: StorletRequest instance
    :param storlet_pipe_path: path string to pipe
    :param storlet_logger_path: path string to log file
    :param timeout: integer of timeout for waiting the resp from container
    :param logger: logger instance
    :param extra_sources (WIP): a list of StorletRequest instances
                                which keep data_iter for adding extra source
                                as data stream
    """
    def __init__(self, srequest, storlet_pipe_path, storlet_logger_path,
                 timeout, logger, extra_sources=None):
        self.srequest = srequest
        self.storlet_pipe_path = storlet_pipe_path
        self.storlet_logger_path = storlet_logger_path
        self.storlet_logger = StorletLogger(self.storlet_logger_path,
                                            'storlet_invoke')
        self.logger = logger
        self.timeout = timeout

        # local side file descriptors
        self.data_read_fd = None
        self.data_write_fd = None
        self.metadata_read_fd = None
        self.metadata_write_fd = None
        self.execution_str_read_fd = None
        self.execution_str_write_fd = None
        self.task_id = None
        self._input_data_read_fd = None
        self._input_data_write_fd = None

        self.extra_data_sources = []
        extra_sources = extra_sources or []
        for source in extra_sources:
            if source.has_fd:
                # TODO(kota_): it may be data_fd in the future.
                raise Exception(
                    'extra_source no requires data_fd just data_iter')
            self.extra_data_sources.append(
                {'read_fd': None, 'write_fd': None,
                 'user_metadata': source.user_metadata,
                 'data_iter': source.data_iter})

        if not os.path.exists(storlet_logger_path):
            os.makedirs(storlet_logger_path)

    @property
    def input_data_read_fd(self):
        """
        File descriptor to read the input body content
        """
        if self.srequest.has_fd:
            return self.srequest.data_fd
        else:
            return self._input_data_read_fd

    @property
    def remote_fds(self):
        """
        File descriptors to be passed to container side
        """
        remote_fds = [self.input_data_read_fd,
                      self.execution_str_write_fd,
                      self.data_write_fd,
                      self.metadata_write_fd,
                      self.storlet_logger.getfd()]

        for source in self.extra_data_sources:
            remote_fds.append(source['read_fd'])
        return remote_fds

    @property
    def remote_fds_metadata(self):
        """
        Metadata about file descriptors to be passed to container side
        """
        input_fd_metadata = FDMetadata(
            sbus_fd.SBUS_FD_INPUT_OBJECT,
            storage_metadata=self.srequest.user_metadata)
        if self.srequest.user_metadata:
            input_fd_metadata.storage_metadata.update(
                self.srequest.user_metadata)
        if self.srequest.has_range:
            input_fd_metadata.storlets_metadata['start'] = \
                str(self.srequest.start)
            input_fd_metadata.storlets_metadata['end'] = \
                str(self.srequest.end)
        fds_metadata = [
            input_fd_metadata.to_dict(),
            FDMetadata(sbus_fd.SBUS_FD_OUTPUT_TASK_ID).to_dict(),
            FDMetadata(sbus_fd.SBUS_FD_OUTPUT_OBJECT).to_dict(),
            FDMetadata(sbus_fd.SBUS_FD_OUTPUT_OBJECT_METADATA).to_dict(),
            FDMetadata(sbus_fd.SBUS_FD_LOGGER).to_dict()]

        for source in self.extra_data_sources:
            fdmd = FDMetadata(
                sbus_fd.SBUS_FD_INPUT_OBJECT,
                storage_metadata=source['user_metadata'])
            fds_metadata.append(fdmd.to_dict())
        return fds_metadata

    @contextmanager
    def _activate_invocation_descriptors(self):
        """
        Contextmanager about file descriptors used in storlet invocation

        NOTE: This context manager now only closes remote side fds,
              so you should close local side fds
        """
        self._prepare_invocation_descriptors()
        try:
            yield
        finally:
            self._close_remote_side_descriptors()

    def _prepare_invocation_descriptors(self):
        """
        Create all pipse used for Storlet execution
        """
        if not self.srequest.has_fd:
            self._input_data_read_fd, self._input_data_write_fd = os.pipe()
        self.data_read_fd, self.data_write_fd = os.pipe()
        self.execution_str_read_fd, self.execution_str_write_fd = os.pipe()
        self.metadata_read_fd, self.metadata_write_fd = os.pipe()

        for source in self.extra_data_sources:
            source['read_fd'], source['write_fd'] = os.pipe()

    def _safe_close(self, fds):
        """
        Make sure that all of the file descriptors get closed

        :param fds: a list of file descriptors
        """
        for fd in fds:
            try:
                os.close(fd)
            except OSError as err:
                if err.errno != errno.EBADF:
                    raise
                # TODO(kota_): fd might be closed already, so if already
                # closed, OSError will be raised. we need more refactor to
                # keep clean the file discriptors.
                pass

    def _close_remote_side_descriptors(self):
        """
        Close all of the container side descriptors
        """
        fds = [self.data_write_fd, self.metadata_write_fd,
               self.execution_str_write_fd]
        if not self.srequest.has_fd:
            fds.append(self.input_data_read_fd)
        fds.extend([source['read_fd'] for source in self.extra_data_sources])
        for fd in fds:
            os.close(fd)

    def _close_local_side_descriptors(self):
        """
        Close all of the host side descriptors
        """
        fds = [self.data_read_fd, self.metadata_read_fd,
               self.execution_str_read_fd]
        fds.extend([source['write_fd'] for source in self.extra_data_sources])
        self._safe_close(fds)

    def _cancel(self):
        """
        Cancel on-going storlet execution
        """
        with _open_pipe() as (read_fd, write_fd):
            dtg = SBusServiceDatagram(
                sbus_cmd.SBUS_CMD_CANCEL,
                [write_fd],
                [FDMetadata(sbus_fd.SBUS_FD_SERVICE_OUT).to_dict()],
                None,
                self.task_id)
            rc = SBus.send(self.storlet_pipe_path, dtg)
            if (rc < 0):
                raise StorletRuntimeException('Failed to cancel task')
            # TODO(takashi): Check the response here
            os.read(read_fd, 10)

    def _invoke(self):
        """
        Send an execution command to the remote daemon factory
        """
        with self.storlet_logger.activate(),\
                self._activate_invocation_descriptors():
            self._send_execute_command()
        self._wait_for_read_with_timeout(self.execution_str_read_fd)
        # TODO(kota_): need an assertion for task_id format
        self.task_id = os.read(self.execution_str_read_fd, 10)
        os.close(self.execution_str_read_fd)

    def _send_execute_command(self):
        """
        Send execute command to the remote daemon factory to invoke storlet
        execution
        """
        dtg = SBusExecuteDatagram(
            sbus_cmd.SBUS_CMD_EXECUTE,
            self.remote_fds,
            self.remote_fds_metadata,
            self.srequest.params)
        rc = SBus.send(self.storlet_pipe_path, dtg)

        if (rc < 0):
            raise StorletRuntimeException("Failed to send execute command")

    def _wait_for_read_with_timeout(self, fd):
        """
        Wait while the read file descriptor gets ready

        :param fd: File descriptor to read
        :raises StorletTimeout: Exception raised when it times out to cancel
                                the existing task
        :raises StorletRuntimeException: Exception raised when it fails to
                                         cancel the existing task
        """
        try:
            with StorletTimeout(self.timeout):
                r, w, e = select.select([fd], [], [])
        except StorletTimeout:
            exc_type, exc_value, exc_traceback = sys.exc_info()

            # When there is a task already running, we should cancel it.
            if self.task_id:
                try:
                    self._cancel()
                except StorletRuntimeException:
                    self.logger.warning(
                        'Task %s timed out, but failed to get canceled'
                        % self.task_id)
                    pass

            six.reraise(exc_type, exc_value, exc_traceback)
        if fd not in r:
            raise StorletRuntimeException('Read fd is not ready')

    def _read_metadata(self):
        """
        Read metadata in the storlet execution result from fd

        :returns: a dict of metadata
        """
        self._wait_for_read_with_timeout(self.metadata_read_fd)
        flat_json = os.read(self.metadata_read_fd, MAX_METADATA_SIZE)
        os.close(self.metadata_read_fd)
        try:
            return json.loads(flat_json)
        except ValueError:
            self.logger.exception('Failed to load metadata from json')
            raise StorletRuntimeException('Got invalid format about metadata')

    def _wait_for_write_with_timeout(self, fd):
        """
        Wait while the write file descriptor gets ready

        :param fd: File descriptor to write
        :raises StorletTimeout: Exception raised when it times out to cancel
                                the existing task
        :raises StorletRuntimeException: Exception raised when it fails to
                                         cancel the existing task
        """
        with StorletTimeout(self.timeout):
            r, w, e = select.select([], [fd], [])
        if fd not in w:
            raise StorletRuntimeException('Write fd is not ready')

    def _close_input_data_descriptors(self):
        fds = [self._input_data_read_fd, self._input_data_write_fd]
        self._safe_close(fds)

    def communicate(self):
        try:
            self._invoke()

            if not self.srequest.has_fd:
                self._wait_for_write_with_timeout(self._input_data_write_fd)

                # We do the writing in a different thread.
                # Otherwise, we can run into the following deadlock
                # 1. middleware writes to Storlet
                # 2. Storlet reads and starts to write metadata and then data
                # 3. middleware continues writing
                # 4. Storlet continues writing and gets stuck as middleware
                #    is busy writing, but still not consuming the reader end
                #    of the Storlet writer.
                eventlet.spawn_n(self._write_input_data,
                                 self._input_data_write_fd,
                                 self.srequest.data_iter)

            for source in self.extra_data_sources:
                # NOTE(kota_): not sure right now if using eventlet.spawn_n is
                #              right way. GreenPool is better? I don't get
                #              whole for the dead lock described in above.
                self._wait_for_write_with_timeout(source['write_fd'])
                eventlet.spawn_n(self._write_input_data,
                                 source['write_fd'],
                                 source['data_iter'])

            out_md = self._read_metadata()
            self._wait_for_read_with_timeout(self.data_read_fd)

            return StorletResponse(out_md, data_fd=self.data_read_fd,
                                   cancel=self._cancel)
        except Exception:
            self._close_local_side_descriptors()
            if not self.srequest.has_fd:
                self._close_input_data_descriptors()
            raise

    @contextmanager
    def _open_writer(self, fd):
        with os.fdopen(fd, 'w') as writer:
            yield writer

    def _write_input_data(self, fd, data_iter):
        try:
            # double try/except block saving from unexpected errors
            try:
                with self._open_writer(fd) as writer:
                    for chunk in data_iter:
                        with StorletTimeout(self.timeout):
                            writer.write(chunk)
            except (OSError, TypeError, ValueError):
                self.logger.exception('fdopen failed')
            except IOError:
                # this will happen at sort of broken pipe while writer.write
                self.logger.exception('IOError with writing fd %s' % fd)
            except StorletTimeout:
                self.logger.exception(
                    'Timeout (%s)s with writing fd %s' % (self.timeout, fd))
        except Exception:
            # _write_input_data is designed to run eventlet thread
            # so that we should catch and suppress it here
            self.logger.exception('Unexpected error at writing input data')
Exemplo n.º 4
0
class StorletInvocationProtocol(object):
    """
    StorletInvocationProtocol class

    This class serves communictaion with a Docker container to run an
    application

    :param srequest: StorletRequest instance
    :param storlet_pipe_path: path string to pipe
    :param storlet_logger_path: path string to log file
    :param timeout: integer of timeout for waiting the resp from container
    :param logger: logger instance
    :param extra_sources (WIP): a list of StorletRequest instances
                                which keep data_iter for adding extra source
                                as data stream
    """
    def __init__(self, srequest, storlet_pipe_path, storlet_logger_path,
                 timeout, logger, extra_sources=None):
        self.srequest = srequest
        self.storlet_pipe_path = storlet_pipe_path
        self.storlet_logger_path = storlet_logger_path
        self.storlet_logger = StorletLogger(self.storlet_logger_path,
                                            'storlet_invoke')
        self.logger = logger
        self.timeout = timeout

        # local side file descriptors
        self.data_read_fd = None
        self.data_write_fd = None
        self.metadata_read_fd = None
        self.metadata_write_fd = None
        self.execution_str_read_fd = None
        self.execution_str_write_fd = None
        self.task_id = None
        self._input_data_read_fd = None
        self._input_data_write_fd = None

        self.extra_data_sources = []
        extra_sources = extra_sources or []
        for source in extra_sources:
            if source.has_fd:
                # TODO(kota_): it may be data_fd in the future.
                raise Exception(
                    'extra_source no requires data_fd just data_iter')
            self.extra_data_sources.append(
                {'read_fd': None, 'write_fd': None,
                 'user_metadata': source.user_metadata,
                 'data_iter': source.data_iter})

        if not os.path.exists(storlet_logger_path):
            os.makedirs(storlet_logger_path)

    @property
    def input_data_read_fd(self):
        """
        File descriptor to read the input body content
        """
        if self.srequest.has_fd:
            return self.srequest.data_fd
        else:
            return self._input_data_read_fd

    @property
    def remote_fds(self):
        """
        File descriptors to be passed to container side
        """
        remote_fds = [self.input_data_read_fd,
                      self.execution_str_write_fd,
                      self.data_write_fd,
                      self.metadata_write_fd,
                      self.storlet_logger.getfd()]

        for source in self.extra_data_sources:
            remote_fds.append(source['read_fd'])
        return remote_fds

    @property
    def remote_fds_metadata(self):
        """
        Metadata about file descriptors to be passed to container side
        """
        input_fd_metadata = FDMetadata(
            sbus_fd.SBUS_FD_INPUT_OBJECT,
            storage_metadata=self.srequest.user_metadata)
        if self.srequest.user_metadata:
            input_fd_metadata.storage_metadata.update(
                self.srequest.user_metadata)
        if self.srequest.has_range:
            input_fd_metadata.storlets_metadata['start'] = \
                str(self.srequest.start)
            input_fd_metadata.storlets_metadata['end'] = \
                str(self.srequest.end)
        fds_metadata = [
            input_fd_metadata.to_dict(),
            FDMetadata(sbus_fd.SBUS_FD_OUTPUT_TASK_ID).to_dict(),
            FDMetadata(sbus_fd.SBUS_FD_OUTPUT_OBJECT).to_dict(),
            FDMetadata(sbus_fd.SBUS_FD_OUTPUT_OBJECT_METADATA).to_dict(),
            FDMetadata(sbus_fd.SBUS_FD_LOGGER).to_dict()]

        for source in self.extra_data_sources:
            fdmd = FDMetadata(
                sbus_fd.SBUS_FD_INPUT_OBJECT,
                storage_metadata=source['user_metadata'])
            fds_metadata.append(fdmd.to_dict())
        return fds_metadata

    @contextmanager
    def _activate_invocation_descriptors(self):
        """
        Contextmanager about file descriptors used in storlet invocation

        NOTE: This context manager now only closes remote side fds,
              so you should close local side fds
        """
        self._prepare_invocation_descriptors()
        try:
            yield
        finally:
            self._close_remote_side_descriptors()

    def _prepare_invocation_descriptors(self):
        """
        Create all pipse used for Storlet execution
        """
        if not self.srequest.has_fd:
            self._input_data_read_fd, self._input_data_write_fd = os.pipe()
        self.data_read_fd, self.data_write_fd = os.pipe()
        self.execution_str_read_fd, self.execution_str_write_fd = os.pipe()
        self.metadata_read_fd, self.metadata_write_fd = os.pipe()

        for source in self.extra_data_sources:
            source['read_fd'], source['write_fd'] = os.pipe()

    def _safe_close(self, fds):
        """
        Make sure that all of the file descriptors get closed

        :param fds: a list of file descriptors
        """
        for fd in fds:
            try:
                os.close(fd)
            except OSError as err:
                if err.errno != errno.EBADF:
                    raise
                # TODO(kota_): fd might be closed already, so if already
                # closed, OSError will be raised. we need more refactor to
                # keep clean the file discriptors.
                pass

    def _close_remote_side_descriptors(self):
        """
        Close all of the container side descriptors
        """
        fds = [self.data_write_fd, self.metadata_write_fd,
               self.execution_str_write_fd]
        if not self.srequest.has_fd:
            fds.append(self.input_data_read_fd)
        fds.extend([source['read_fd'] for source in self.extra_data_sources])
        for fd in fds:
            os.close(fd)

    def _close_local_side_descriptors(self):
        """
        Close all of the host side descriptors
        """
        fds = [self.data_read_fd, self.metadata_read_fd,
               self.execution_str_read_fd]
        fds.extend([source['write_fd'] for source in self.extra_data_sources])
        self._safe_close(fds)

    def _cancel(self):
        """
        Cancel on-going storlet execution
        """
        with _open_pipe() as (read_fd, write_fd):
            dtg = SBusDatagram.create_service_datagram(
                sbus_cmd.SBUS_CMD_CANCEL,
                write_fd,
                None,
                self.task_id)
            rc = SBus.send(self.storlet_pipe_path, dtg)
            if (rc < 0):
                raise StorletRuntimeException('Failed to cancel task')
            # TODO(takashi): Check the reponse here
            os.read(read_fd, 10)

    def _invoke(self):
        """
        Send an execution command to the remote daemon factory
        """
        with self.storlet_logger.activate(),\
                self._activate_invocation_descriptors():
            self._send_execute_command()
        self._wait_for_read_with_timeout(self.execution_str_read_fd)
        # TODO(kota_): need an assertion for task_id format
        self.task_id = os.read(self.execution_str_read_fd, 10)
        os.close(self.execution_str_read_fd)

    def _send_execute_command(self):
        """
        Send execute command to the remote daemon factory to invoke storlet
        execution
        """
        dtg = SBusDatagram(
            sbus_cmd.SBUS_CMD_EXECUTE,
            self.remote_fds,
            self.remote_fds_metadata,
            self.srequest.params)
        rc = SBus.send(self.storlet_pipe_path, dtg)

        if (rc < 0):
            raise StorletRuntimeException("Failed to send execute command")

    def _wait_for_read_with_timeout(self, fd):
        """
        Wait while the read file descriptor gets ready

        :param fd: File descriptor to read
        :raises StorletTimeout: Exception raised when it times out to cancel
                                the existing task
        :raises StorletRuntimeException: Exception raised when it fails to
                                         cancel the existing task
        """
        try:
            with StorletTimeout(self.timeout):
                r, w, e = select.select([fd], [], [])
        except StorletTimeout:
            exc_type, exc_value, exc_traceback = sys.exc_info()

            # When there is a task already running, we should cancel it.
            if self.task_id:
                try:
                    self._cancel()
                except StorletRuntimeException:
                    self.logger.warning(
                        'Task %s timed out, but failed to get canceled'
                        % self.task_id)
                    pass

            six.reraise(exc_type, exc_value, exc_traceback)
        if fd not in r:
            raise StorletRuntimeException('Read fd is not ready')

    def _read_metadata(self):
        """
        Read metadata in the storlet execution result from fd

        :returns: a dict of metadata
        """
        self._wait_for_read_with_timeout(self.metadata_read_fd)
        flat_json = os.read(self.metadata_read_fd, MAX_METADATA_SIZE)
        os.close(self.metadata_read_fd)
        try:
            return json.loads(flat_json)
        except ValueError:
            self.logger.exception('Failed to load metadata from json')
            raise StorletRuntimeException('Got invalid format about metadata')

    def _wait_for_write_with_timeout(self, fd):
        """
        Wait while the write file descriptor gets ready

        :param fd: File descriptor to write
        :raises StorletTimeout: Exception raised when it times out to cancel
                                the existing task
        :raises StorletRuntimeException: Exception raised when it fails to
                                         cancel the existing task
        """
        with StorletTimeout(self.timeout):
            r, w, e = select.select([], [fd], [])
        if fd not in w:
            raise StorletRuntimeException('Write fd is not ready')

    def _close_input_data_descriptors(self):
        fds = [self._input_data_read_fd, self._input_data_write_fd]
        self._safe_close(fds)

    def communicate(self):
        try:
            self._invoke()

            if not self.srequest.has_fd:
                self._wait_for_write_with_timeout(self._input_data_write_fd)

                # We do the writing in a different thread.
                # Otherwise, we can run into the following deadlock
                # 1. middleware writes to Storlet
                # 2. Storlet reads and starts to write metadata and then data
                # 3. middleware continues writing
                # 4. Storlet continues writing and gets stuck as middleware
                #    is busy writing, but still not consuming the reader end
                #    of the Storlet writer.
                eventlet.spawn_n(self._write_input_data,
                                 self._input_data_write_fd,
                                 self.srequest.data_iter)

            for source in self.extra_data_sources:
                # NOTE(kota_): not sure right now if using eventlet.spawn_n is
                #              right way. GreenPool is better? I don't get
                #              whole for the dead lock described in above.
                self._wait_for_write_with_timeout(source['write_fd'])
                eventlet.spawn_n(self._write_input_data,
                                 source['write_fd'],
                                 source['data_iter'])

            out_md = self._read_metadata()
            self._wait_for_read_with_timeout(self.data_read_fd)

            return StorletResponse(out_md, data_fd=self.data_read_fd,
                                   cancel=self._cancel)
        except Exception:
            self._close_local_side_descriptors()
            if not self.srequest.has_fd:
                self._close_input_data_descriptors()
            raise

    @contextmanager
    def _open_writer(self, fd):
        try:
            writer = os.fdopen(fd, 'w')
        except (OSError, TypeError, ValueError):
            exc_type, exc_value, exc_traceback = sys.exc_info()
            try:
                os.close(fd)
            except Exception:
                # any error is ok to close this operation because even if it
                # happens, we cannot do anything.
                pass
            six.reraise(exc_type, exc_value, exc_traceback)

        try:
            yield writer
        finally:
            writer.close()
            # NOTE(takashi): writer.close() also closes fd, so we don't have to
            #                close fd again.

    def _write_input_data(self, fd, data_iter):
        try:
            # double try/except block saving from unexpected errors
            try:
                with self._open_writer(fd) as writer:
                    for chunk in data_iter:
                        with StorletTimeout(self.timeout):
                            writer.write(chunk)
            except (OSError, TypeError, ValueError):
                self.logger.exception('fdopen failed')
            except IOError:
                # this will happen at sort of broken pipe while writer.write
                self.logger.exception('IOError with writing fd %s' % fd)
            except StorletTimeout:
                self.logger.exception(
                    'Timeout (%s)s with writing fd %s' % (self.timeout, fd))
        except Exception:
            # _write_input_data is designed to run eventlet thread
            # so that we should catch and suppress it here
            self.logger.exception('Unexpected error at writing input data')
Exemplo n.º 5
0
 def setUp(self):
     self.log_dir = tempfile.mkdtemp()
     self.log_path = tempfile.mktemp(dir=self.log_dir)
     self.logger = StorletLogger(self.log_path)
Exemplo n.º 6
0
class TestStorletLogger(unittest.TestCase):
    def setUp(self):
        self.log_dir = tempfile.mkdtemp()
        self.log_path = tempfile.mktemp(dir=self.log_dir)
        self.logger = StorletLogger(self.log_path)

    def tearDown(self):
        if os.path.isdir(self.log_dir):
            shutil.rmtree(self.log_dir)

    def test_open_close(self):
        self.assertIsNone(self.logger._file)
        self.logger.open()
        self.assertIsNotNone(self.logger._file)
        self.assertTrue(os.path.isfile(self.log_path))
        self.logger.close()
        self.assertIsNone(self.logger._file)

        # Make sure that log_dir is deleted
        shutil.rmtree(self.log_dir)

        # As log_dir does not exists, open should create it
        self.logger.open()
        self.assertIsNotNone(self.logger._file)
        self.assertTrue(os.path.isdir(self.log_dir))
        self.assertTrue(os.path.isfile(self.log_path))
        self.logger.close()
        self.assertIsNone(self.logger._file)

        # opened twice
        self.logger.open()
        with self.assertRaises(StorletLoggerError):
            self.logger.open()
        self.logger.close()

        # closed twice
        self.logger.open()
        self.logger.close()
        with self.assertRaises(StorletLoggerError):
            self.logger.close()

    def test_getfd(self):
        self.assertIsNone(self.logger.getfd())
        self.logger.open()
        self.assertIsNotNone(self.logger.getfd())
        self.logger.close()

    def test_getsize(self):
        self.logger.open()
        self.logger._file.write('a' * 1024)
        self.logger.close()
        self.assertEqual(1024, self.logger.getsize())