コード例 #1
0
class QueueHandler(logging.Handler):
    def __init__(self):
        logging.Handler.__init__(self)
        self.queue = Queue()
        self._records_lost = 0

    def _put_record(self, record):
        self.format(record)
        record.msg = record.message
        record.args = None
        record.exc_info = None
        self.queue.put_nowait(record)

    def _try_to_report_lost_records(self):
        if self._records_lost:
            try:
                record = _LOGGER.makeRecord(
                    _LOGGER.name, logging.WARNING, __file__, 0,
                    'QueueHandler has lost %s log records',
                    (self._records_lost, ), None, 'emit')
                self._put_record(record)
                self._records_lost = 0
            except Exception:
                pass

    def emit(self, record):
        try:
            self._put_record(record)
            self._try_to_report_lost_records()
        except Exception:
            self._records_lost += 1

    @property
    def records_lost(self):
        return self._records_lost
コード例 #2
0
class ThreadPool(object):
    def __init__(self, workers=10):
        self.queue = Queue()
        self.workers = []
        self.tasks = []
        for worker in range(workers):
            self.workers.append(Worker(self.queue))

    def add(self, ident, func, args=None, kwargs=None):
        if args is None:
            args = ()
        if kwargs is None:
            kwargs = {}
        task = (ident, func, args, kwargs)
        self.tasks.append(ident)
        self.queue.put_nowait(task)

    def join(self):
        for worker in self.workers:
            worker.start()

        results = defaultdict(list)
        for worker in self.workers:
            worker.join()
            for k, v in six.iteritems(worker.results):
                results[k].extend(v)
        return results
コード例 #3
0
ファイル: __init__.py プロジェクト: andrewlukoshko/vdsm
class NLSocketPool(object):
    """Pool of netlink sockets."""
    def __init__(self, size):
        if size <= 0:
            raise ValueError('Invalid socket pool size %r. Must be positive')
        self._semaphore = BoundedSemaphore(size)
        self._sockets = Queue(maxsize=size)

    @contextmanager
    def socket(self):
        """Returns a socket from the pool (creating it when needed)."""
        with self._semaphore:
            try:
                sock = self._sockets.get_nowait()
            except Empty:
                sock = _open_socket()
            try:
                yield sock
            finally:
                self._sockets.put_nowait(sock)
コード例 #4
0
ファイル: __init__.py プロジェクト: igoihman/vdsm
class NLSocketPool(object):
    """Pool of netlink sockets."""
    def __init__(self, size):
        if size <= 0:
            raise ValueError('Invalid socket pool size %r. Must be positive')
        self._semaphore = BoundedSemaphore(size)
        self._sockets = Queue(maxsize=size)

    @contextmanager
    def socket(self):
        """Returns a socket from the pool (creating it when needed)."""
        with self._semaphore:
            try:
                sock = self._sockets.get_nowait()
            except Empty:
                sock = _open_socket()
            try:
                yield sock
            finally:
                self._sockets.put_nowait(sock)
コード例 #5
0
ファイル: amqp.py プロジェクト: kuldat/anypubsub
class AmqpSubscriber(Subscriber):
    def __init__(self, amqp_chan, exchanges):
        self.channel = amqp_chan
        self.messages = Queue(maxsize=0)
        qname, _, _ = self.channel.queue_declare()
        for exchange in exchanges:
            self.channel.queue_bind(qname, exchange)
        self.channel.basic_consume(queue=qname, callback=self.callback)

    def callback(self, msg):
        self.channel.basic_ack(msg.delivery_tag)
        self.messages.put_nowait(msg.body)

    def __iter__(self):
        return self

    def next(self):
        while self.messages.empty():
            self.channel.wait()
        return self.messages.get_nowait()

    __next__ = next   # PY3
コード例 #6
0
class AmqpSubscriber(Subscriber):
    def __init__(self, amqp_chan, exchanges):
        self.channel = amqp_chan
        self.messages = Queue(maxsize=0)
        qname, _, _ = self.channel.queue_declare()
        for exchange in exchanges:
            self.channel.queue_bind(qname, exchange)
        self.channel.basic_consume(queue=qname, callback=self.callback)

    def callback(self, msg):
        self.channel.basic_ack(msg.delivery_tag)
        self.messages.put_nowait(msg.body)

    def __iter__(self):
        return self

    def next(self):
        while self.messages.empty():
            self.channel.wait()
        return self.messages.get_nowait()

    __next__ = next  # PY3
コード例 #7
0
class KeyboardCapture(threading.Thread):
    """Implementation of KeyboardCapture for OSX."""

    _KEYBOARD_EVENTS = set([kCGEventKeyDown, kCGEventKeyUp])

    def __init__(self):
        threading.Thread.__init__(self, name="KeyboardEventTapThread")
        self._loop = None
        self._event_queue = Queue()  # Drained by event handler thread.

        self._suppressed_keys = set()
        self.key_down = lambda key: None
        self.key_up = lambda key: None

        # Returning the event means that it is passed on
        # for further processing by others.
        #
        # Returning None means that the event is intercepted.
        #
        # Delaying too long in returning appears to cause the
        # system to ignore the tap forever after
        # (https://github.com/openstenoproject/plover/issues/484#issuecomment-214743466).
        #
        # This motivates pushing callbacks to the other side
        # of a queue of received events, so that we can return
        # from this callback as soon as possible.
        def callback(proxy, event_type, event, reference):
            SUPPRESS_EVENT = None
            PASS_EVENT_THROUGH = event

            # Don't pass on meta events meant for this event tap.
            is_unexpected_event = event_type not in self._KEYBOARD_EVENTS
            if is_unexpected_event:
                if event_type == kCGEventTapDisabledByTimeout:
                    # Re-enable the tap and hope we act faster next time
                    CGEventTapEnable(self._tap, True)
                    plover.log.warning(
                        "Keystrokes may have been missed. " +
                        "Keyboard event tap has been re-enabled. ")
                return SUPPRESS_EVENT

            # Don't intercept the event if it has modifiers, allow
            # Fn and Numeric flags so we can suppress the arrow and
            # extended (home, end, etc...) keys.
            suppressible_modifiers = (kCGEventFlagMaskNumericPad
                                      | kCGEventFlagMaskSecondaryFn
                                      | kCGEventFlagMaskNonCoalesced)
            has_nonsupressible_modifiers = \
                CGEventGetFlags(event) & ~suppressible_modifiers
            if has_nonsupressible_modifiers and event_type == kCGEventKeyDown:
                return PASS_EVENT_THROUGH

            keycode = CGEventGetIntegerValueField(event,
                                                  kCGKeyboardEventKeycode)
            key = KEYCODE_TO_KEY.get(keycode)
            self._async_dispatch(key, event_type)
            if key in self._suppressed_keys:
                return SUPPRESS_EVENT
            return PASS_EVENT_THROUGH

        self._tap = CGEventTapCreate(
            kCGSessionEventTap, kCGHeadInsertEventTap,
            kCGEventTapOptionDefault,
            CGEventMaskBit(kCGEventKeyDown) | CGEventMaskBit(kCGEventKeyUp),
            callback, None)
        if self._tap is None:
            # Todo(hesky): See if there is a nice way to show the user what's
            # needed (or do it for them).
            raise Exception("Enable access for assistive devices.")
        CGEventTapEnable(self._tap, False)

    def run(self):
        source = CFMachPortCreateRunLoopSource(None, self._tap, 0)
        handler_thread = threading.Thread(target=self._event_handler,
                                          name="KeyEventDispatcher")
        handler_thread.start()
        self._loop = CFRunLoopGetCurrent()
        CFRunLoopAddSource(self._loop, source, kCFRunLoopCommonModes)
        CGEventTapEnable(self._tap, True)

        CFRunLoopRun()

        # Wake up event handler.
        self._event_queue.put_nowait(None)
        handler_thread.join()
        CFMachPortInvalidate(self._tap)
        CFRelease(self._tap)
        CFRunLoopSourceInvalidate(source)

    def cancel(self):
        CFRunLoopStop(self._loop)
        self.join()
        self._loop = None

    def suppress_keyboard(self, suppressed_keys=()):
        self._suppressed_keys = set(suppressed_keys)

    def _async_dispatch(self, key, event_type):
        """
        Dispatches a key string in KEYCODE_TO_KEY.values() and a CGEventType
        to the appropriate KeyboardCapture callback
        without blocking execution of its caller.
        """
        if key is None:
            return

        is_keyup = event_type == kCGEventKeyUp
        pair = (key, is_keyup)
        self._event_queue.put_nowait(pair)

    def _event_handler(self):
        """
        Event dispatching thread launched during run().
        Loops until None is received from _event_queue.
        Avoids busy-waiting by blocking on _event_queue.

        In normal operation, it gets a pair of
        (key_string, is_keyup_bool) from _event_queue
        and routes the string to self.key_up or self.key_down,
        then waits for a new pair to arrive.
        """
        while True:
            pair = self._event_queue.get(block=True, timeout=None)
            if pair is None:
                return

            key, is_keyup = pair
            handler = self.key_up if is_keyup else self.key_down
            handler(key)
コード例 #8
0
ファイル: __init__.py プロジェクト: andrewlukoshko/vdsm
class JsonRpcServer(object):
    log = logging.getLogger("jsonrpc.JsonRpcServer")

    FILTERED_METHODS = frozenset(['Host.getAllVmStats'])

    """
    Creates new JsonrRpcServer by providing a bridge, timeout in seconds
    which defining how often we should log connections stats and thread
    factory.
    """
    def __init__(self, bridge, timeout, cif, threadFactory=None):
        self._bridge = bridge
        self._cif = cif
        self._workQueue = Queue()
        self._threadFactory = threadFactory
        self._timeout = timeout
        self._next_report = monotonic_time() + self._timeout
        self._counter = 0

    def queueRequest(self, req):
        self._workQueue.put_nowait(req)

    """
    Aggregates number of requests received by vdsm. Each request from
    a batch is added separately. After time defined by timeout we log
    number of requests.
    """
    def _attempt_log_stats(self):
        self._counter += 1
        if monotonic_time() > self._next_report:
            self.log.info('%s requests processed during %s seconds',
                          self._counter, self._timeout)
            self._next_report += self._timeout
            self._counter = 0

    def _serveRequest(self, ctx, req):
        self._attempt_log_stats()
        logLevel = logging.DEBUG
        suppress_logging = req.method in self.FILTERED_METHODS

        # VDSM should never respond to any request before all information about
        # running VMs is recovered, see https://bugzilla.redhat.com/1339291
        if not self._cif.ready:
            self.log.info("In recovery, ignoring '%s' in bridge with %s",
                          req.method, req.params)
            # TODO: take the response from the exception instead of via errCode
            ctx.requestDone(JsonRpcResponse(errCode['recovery'], None, req.id))
            return

        self.log.log(logLevel, "Calling '%s' in bridge with %s",
                     req.method, req.params)
        try:
            method = self._bridge.dispatch(req.method)
        except JsonRpcMethodNotFoundError as e:
            if req.isNotification():
                return

            ctx.requestDone(JsonRpcResponse(None, e, req.id))
            return

        try:
            params = req.params
            self._bridge.register_server_address(ctx.server_address)
            if isinstance(req.params, list):
                res = method(*params)
            else:
                res = method(**params)
            self._bridge.unregister_server_address()
        except JsonRpcError as e:
            ctx.requestDone(JsonRpcResponse(None, e, req.id))
        except Exception as e:
            self.log.exception("Internal server error")
            ctx.requestDone(JsonRpcResponse(None,
                                            JsonRpcInternalError(str(e)),
                                            req.id))
        else:
            res = True if res is None else res
            log_res = "(suppressed)" if suppress_logging else res
            self.log.log(logLevel, "Return '%s' in bridge with %s",
                         req.method, log_res)
            ctx.requestDone(JsonRpcResponse(res, None, req.id))

    @traceback(on=log.name)
    def serve_requests(self):
        while True:
            obj = self._workQueue.get()
            if obj is None:
                break

            client, server_address, msg = obj
            self._parseMessage(client, server_address, msg)

    def _parseMessage(self, client, server_address, msg):
        ctx = _JsonRpcServeRequestContext(client, server_address)

        try:
            rawRequests = json.loads(msg)
        except:
            ctx.addResponse(JsonRpcResponse(None, JsonRpcParseError(), None))
            ctx.sendReply()
            return

        if isinstance(rawRequests, list):
            # Empty batch request
            if len(rawRequests) == 0:
                ctx.addResponse(
                    JsonRpcResponse(None,
                                    JsonRpcInvalidRequestError(
                                        'request batch is empty',
                                        rawRequests),
                                    None))
                ctx.sendReply()
                return
        else:
            # From this point on we know it's always a list
            rawRequests = [rawRequests]

        # JSON Parsed handling each request
        requests = []
        for rawRequest in rawRequests:
            try:
                req = JsonRpcRequest.fromRawObject(rawRequest)
                requests.append(req)
            except JsonRpcError as err:
                ctx.addResponse(JsonRpcResponse(None, err, None))
            except:
                ctx.addResponse(JsonRpcResponse(None,
                                                JsonRpcInternalError(),
                                                None))

        ctx.setRequests(requests)

        # No request was built successfully or is only notifications
        if ctx.counter == 0:
            ctx.sendReply()

        for request in requests:
            self._runRequest(ctx, request)

    def _runRequest(self, ctx, request):
        if self._threadFactory is None:
            self._serveRequest(ctx, request)
        else:
            try:
                self._threadFactory(partial(self._serveRequest, ctx, request))
            except Exception as e:
                self.log.exception("could not allocate request thread")
                ctx.requestDone(
                    JsonRpcResponse(
                        None,
                        JsonRpcInternalError(
                            str(e)
                        ),
                        request.id
                    )
                )

    def stop(self):
        self.log.info("Stopping JsonRPC Server")
        self._workQueue.put_nowait(None)
コード例 #9
0
class Manager:
    """
        Attribute:
            open_handle: A open-handle for handler.

            max_task: Specify the upper limit of running task at the same time.

            open_args: opener default args.

            open_kwargs: opener default kwargs.

            daemon: Default make client thread to be daemon thread if True.

    """
    def __init__(self,
                 max_task,
                 open_handle,
                 open_args=(),
                 open_kwargs=None,
                 daemon=False):
        self._full_queue = []
        self._queue = TaskQueue(unopened=[],
                                opening=[],
                                ready=[],
                                queue=[],
                                running=[],
                                paused=[],
                                finished=[],
                                failed=[])
        self._open_handle = open_handle
        self._max_task = max_task
        if not open_kwargs:
            open_kwargs = {}

        self._open_args = open_args
        self._open_kwargs = open_kwargs

        self._process_mgr = None
        self._thread_queue = ThreadQueue()
        self._process_queue = None

        self._thread_collector = ThreadCollector(daemon)

        self._queue_started = False

        self._queue_lock = Lock()

        self._process_collector = {}

        self._closed = False

    def putrequest(self, request, enqueue=True):
        """ Put a Request into Manager queue.
            Return task id corresponding to the request.
        """
        if not self._process_queue:
            raise ChildProcessDisable('cannot put a request running on '
                                      'child process mode without enabling.')
        if request in self._full_queue:
            raise ValueError('request is already in the task queue.')
        index = len(self._full_queue)
        wrapper = RequestWrapper(id=index,
                                 source=request,
                                 callback=ThreadQueue(),
                                 child_process=request.child_process)
        self._full_queue.append(wrapper)
        self._queue.unopened.append(index)
        if enqueue:
            self.enqueue(index)

        return index

    def enable_child_process_mode(self):
        if not self._process_mgr:
            self._process_mgr = ProcessManager()
            self._process_queue = self._process_mgr.Queue()
            self._process_collector[-1] = self._process_mgr._process

    def remove(self, task_id):
        """ Remove task by task id. """
        self._border_check(task_id)
        self._full_queue[task_id] = None

    def puthandler(self, handler):
        """ Put a Handler into Manager queue.
            Return task id corresponding to the handler.

            Warning: Not supported in child_process mode.
        """
        if handler in self._full_queue:
            raise ValueError('handler is already in the task queue.')
        index = len(self._full_queue)
        wrapper = HandlerWrapper(id=index, source=handler)
        self._full_queue.append(wrapper)
        handler.install_external_callback_queue(
            SignalQueue(id=index, queue=self._thread_queue))
        self._queue.ready.append(index)
        return index

    def get(self, task_id):
        """ Return HandlerWrapper of the task specified by id.

            Warning: this method does not supported by running in child process mode.
        """
        return self._full_queue[task_id]

    def process_info(self):
        infos = {}
        for k, v in self._process_collector.items():
            infos[k] = ProcessInfo(name=v.name, pid=v.pid, ident=v.ident)
        return infos

    def start_all(self):
        for i in self._queue.ready:
            self.start(i)
        for i in self._queue.paused:
            self.start(i)

    def get_all(self):
        for i in self._full_queue:
            if i is not None:
                yield i

    def enqueue(self, task_id):
        """ Make task into the plan queue.

            When queue is started, manager would run the tasks
            in the queue automatically until queue is empty.
        """
        self._border_check(task_id)
        with self._queue_lock:
            if task_id in self._queue.finished:
                raise RuntimeError('cannot queue a finished task.')
            self.__remove_queue_task(task_id)
            self._queue.queue.append(task_id)

    def getinstspeed(self, task_id=None):
        if task_id is not None:
            if not self._opened_info_checking(task_id):
                return 0
            return self._full_queue[task_id].source.getinstspeed()
        else:
            speed = 0
            for i in self._queue.running:
                speed += self._full_queue[i].source.getinstspeed()
            return speed

    def getavgspeed(self, task_id=None):
        if task_id is not None:
            if not self._opened_info_checking(task_id):
                return 0
            return self._full_queue[task_id].source.getavgspeed()
        else:
            speed = 0
            for i in self._queue.running:
                speed += self._full_queue[i].source.getavgspeed()
            return speed

    def getincbyte(self, task_id=None):
        if task_id is not None:
            if not self._opened_info_checking(task_id):
                return 0
            return self._full_queue[task_id].source.get_go_inc()
        else:
            incbyte = 0
            for i in self._queue.running:
                incbyte += self._full_queue[i].source.get_go_inc()
            return incbyte

    def get_remain_time(self, task_id=None):
        if task_id is not None:
            if not self._opened_info_checking(task_id):
                return float('inf')
            return self._full_queue[task_id].source.get_time_left()
        else:
            remain = 0
            for i in self._queue.running:
                remain += self._full_queue[i].source.get_time_left()
            return remain

    def get_remain_byte(self, task_id=None):
        if task_id is not None:
            if not self._opened_info_checking(task_id):
                return float('inf')
            return self._full_queue[task_id].source.get_byte_left()
        else:
            remain = 0
            for i in self._queue.running:
                remain += self._full_queue[i].source.get_byte_left()
            return remain

    def getfileinfo(self, task_id):
        if not self._opened_info_checking(task_id):
            return None
        return self._full_queue[task_id].source.getfileinfo()

    def geturlinfo(self, task_id):
        if not self._opened_info_checking(task_id):
            return None
        return self._full_queue[task_id].source.geturlinfo_all()

    def start_queue(self):
        """ Start running plan queue.

            The max number of running task is limited by self._max_task.
        """
        self._queue_started = True
        cur_task_len = len(self._queue.running)
        cur_task_len += len(self._queue.running)
        if cur_task_len < self._max_task:
            for i in range(self._max_task - cur_task_len):
                if self._queue.queue:
                    task_id = self._queue.queue[0]
                    self.start(task_id)
                else:
                    self.stop_queue()
                    break

    def stop_queue(self):
        """ Stop running plan queue.

            This method does not stop the running/opening task.
        """
        self._queue_started = False

    def start(self, task_id):
        self._border_check(task_id)

        if type(self._full_queue[task_id]) is RequestWrapper:
            self._full_queue[task_id].after(self.start,
                                            args=(task_id, ),
                                            force=True)
            self.__remove_queue_task(task_id)
            self._queue.opening.append(task_id)
            self.open(task_id, block=False)
        else:
            self._full_queue[task_id].source.start()
            self._send_ctrl_signal(SIGNAL_TASK_START, task_id)

        self._check_controller_state()

    def resume(self, task_id):
        """ Resume the failed task.

            Only make failed task out of failed queue, cannot reopen/rerun the task.
        """
        if task_id not in self._queue.failed:
            raise RuntimeError('task <%d> is no need to resume.' % task_id)

        self._queue.failed.remove(task_id)
        if type(self._full_queue[task_id]) is RequestWrapper:
            self._queue.unopened.append(task_id)
        else:
            self._queue.ready.append(task_id)

    def pause(self, task_id):
        self._border_check(task_id)
        self._send_ctrl_signal(SIGNAL_TASK_PAUSE, task_id)

    stop = pause

    def pause_all(self, *, block=True):
        for i in self._queue.running:
            self.pause(i)
        for i in self._thread_collector.get_group(GROUP_OPEN_CTRL):
            task_id = i.owner
            self.pause(task_id)

        if block:
            for i in self._queue.running:
                self._full_queue[i].source.join()

    stop_all = pause_all

    def open(self, task_id, *, block=True):
        self._border_check(task_id)
        self._check_controller_state()

        if type(self._full_queue[task_id]) is not RequestWrapper:
            raise TypeError('opening task should be a type of request.')

        if block:
            return self._open(task_id)
        else:
            self._thread_collector.put(self._open,
                                       GROUP_OPEN,
                                       args=(task_id, ),
                                       owner=task_id).start()

    def open_all(self, *, sequential=False, block=True):
        for i in self._queue.unopened:
            self.open(i, block=sequential)

        if block:
            self._thread_collector.wait(GROUP_OPEN)

    def is_all_finish(self):
        if not self.is_idle():
            return False
        if self._queue.paused or self._queue.failed or self._queue.unopened:
            return False
        return True

    def is_idle(self):
        """ Return True if all running and opening task are over. """
        if self._thread_collector.get_all():
            return False
        if self._queue.running:
            return False
        return True

    def get_queue(self):
        return tuple(self._queue.queue)

    def get_unopened(self):
        return tuple(self._queue.unopened)

    def get_finished(self):
        return tuple(self._queue.finished)

    def get_running(self):
        return tuple(self._queue.running)

    def get_failed(self):
        return tuple(self._queue.failed)

    def get_paused(self):
        return tuple(self._queue.paused)

    def is_finish(self, task_id):
        self._border_check(task_id)
        return self._full_queue[task_id].source.is_finished()

    def close(self):
        if not self.is_idle():
            raise RuntimeError('cannot close a running manager.')

        if self._closed:
            raise RuntimeError('manager has already closed.')

        if self._process_mgr:
            self._process_mgr.shutdown()
            self._process_mgr = None
            del self._process_collector[-1]
        for k, v in self._process_collector.items():
            if not v._close:
                v.terminate()
        self._closed = True

    def _opening_control_thread(self, task_id, handler):
        callback = self._full_queue[task_id].callback
        while True:
            signal = callback.get()
            if signal.id == ID_TASK_OPEN:
                self._full_queue[task_id] = signal.content
                self._send_ctrl_signal(SIGNAL_TASK_OPEN, task_id)
                callback.put_nowait(SIGNAL_CALLBACK_END)
            elif signal.id == ID_TASK_PAUSE:
                handler.pause(block=False)
                self._send_ctrl_signal(SIGNAL_TASK_PAUSE, task_id)
                callback.put_nowait(SIGNAL_CALLBACK_END)
            elif signal.id == ID_TASK_FAIL:
                self._send_ctrl_signal(SIGNAL_TASK_FAIL, task_id,
                                       signal.content)
                callback.put_nowait(SIGNAL_CALLBACK_END)
            elif signal.id == ID_CALLBACK_END:
                while not callback.empty():
                    callback.get_nowait()
                    callback.task_done()
                break
            callback.task_done()

        callback.task_done()

    def _check_controller_state(self):
        if not self._thread_collector.get_group(GROUP_CTRL_T):
            self._thread_collector.put(self._manager_control_thread,
                                       GROUP_CTRL_T,
                                       args=(self._thread_queue, ),
                                       owner=self._thread_queue).start()
        if not self._thread_collector.get_group(GROUP_CTRL_P):
            self._thread_collector.put(self._manager_control_thread,
                                       GROUP_CTRL_P,
                                       args=(self._process_queue, ),
                                       owner=self._process_queue).start()

    def _manager_control_thread(self, queue):
        while True:
            signal = queue.get()
            task_id = signal.content.id
            content = signal.content.content
            if signal.id == ID_TASK_FINISH:
                self.__finished_task_handler(task_id)
            elif signal.id == ID_TASK_OPEN:
                self.__opened_task_handler(task_id)
            elif signal.id in (ID_TASK_EXCEPTION, ID_TASK_FAIL):
                self.__failed_task_handler(task_id, content)
            elif signal.id == ID_TASK_STOP:
                self.__stopped_task_handler(task_id)
            elif signal.id == ID_TASK_PAUSE:
                self.__paused_task_handler(signal.content)
            elif signal.id == ID_TASK_START:
                self.__started_task_handler(task_id)
            elif signal.id == ID_CALLBACK_END:
                break

            self._do_next(task_id)
            queue.task_done()
            self._idle_checking()

        queue.task_done()

    def _border_check(self, task_id):
        if task_id >= len(
                self._full_queue) or self._full_queue[task_id] is None:
            raise IndexError('task id is invalid.')

    def _open(self, task_id):
        wrapper = None
        dlreq_wrapper = self._full_queue[task_id]
        handler = self._open_handle(child_process=dlreq_wrapper.child_process,
                                    **self._open_kwargs)
        if dlreq_wrapper.child_process:
            self._process_collector[task_id] = handler._process
        open_callback = dlreq_wrapper.callback
        self._thread_collector.put(self._opening_control_thread,
                                   GROUP_OPEN_CTRL,
                                   args=(task_id, handler),
                                   owner=task_id).start()
        try:
            handler.open(dlreq_wrapper.source)
        except (HandlerError, URLError, ClientError) as e:
            open_callback.put_nowait(
                SIGNAL_TASK_FAIL(Signal(id=task_id, content=e)))
        except Exception as e:
            open_callback.put_nowait(
                SIGNAL_TASK_FAIL(Signal(id=task_id, content=e)))
            raise
        else:

            if handler.is_opened():
                if dlreq_wrapper.child_process:
                    queue = self._process_queue
                else:
                    queue = self._thread_queue

                wrapper = HandlerWrapper(
                    id=task_id,
                    source=handler,
                    child_process=dlreq_wrapper.child_process)
                handler.install_external_callback_queue(
                    SignalQueue(id=task_id, queue=queue))
                wrapper.move_from(dlreq_wrapper)
                open_callback.put_nowait(SIGNAL_TASK_OPEN(content=wrapper))

            else:
                open_callback.put_nowait(SIGNAL_TASK_PAUSE(Signal(id=task_id)))

        return wrapper

    def _pause_opening(self, task_id):
        flag = False
        open_threads = self._thread_collector.get_group(GROUP_OPEN_CTRL)
        for i in open_threads:
            if i.owner == task_id:
                flag = True
        if flag:
            wrapper = self._full_queue[task_id]
            if type(wrapper) is RequestWrapper:
                wrapper.callback.put_nowait(SIGNAL_TASK_PAUSE())

    def _opened_info_checking(self, task_id):
        return type(self._full_queue[task_id]) is not RequestWrapper

    def _send_ctrl_signal(self, signal_wrapper, task_id, content=None):
        signal = signal_wrapper(Signal(id=task_id, content=content))
        if self._full_queue[task_id].child_process:
            self._process_queue.put_nowait(signal)
        else:
            self._thread_queue.put_nowait(signal)

    def _close_controller(self):
        if self._thread_collector.get_group(GROUP_CTRL_T):
            self._thread_queue.put_nowait(SIGNAL_CALLBACK_END)
        if self._thread_collector.get_group(GROUP_CTRL_P):
            self._process_queue.put_nowait(SIGNAL_CALLBACK_END)

    def _idle_checking(self):
        if not self._queue.opening and not self._queue.running and \
                (not self._queue_started or (self._queue_started and not self._queue.queue)):
            self._close_controller()

    def _do_next(self, task_id):
        next_task = self._full_queue[task_id].get_next()
        if next_task:
            self._thread_collector.put(next_task.handler,
                                       GROUP_DO_NEXT,
                                       args=next_task.args,
                                       kwargs=next_task.kwargs,
                                       owner=task_id).start()

    def __remove_queue_task(self, task_id):
        if task_id in self._queue.running:
            self._queue.running.remove(task_id)
        elif task_id in self._queue.queue:
            self._queue.queue.remove(task_id)
        elif task_id in self._queue.paused:
            self._queue.paused.remove(task_id)
        elif task_id in self._queue.ready:
            self._queue.ready.remove(task_id)
        elif task_id in self._queue.unopened:
            self._queue.unopened.remove(task_id)
        elif task_id in self._queue.failed:
            self._queue.failed.remove(task_id)
        elif task_id in self._queue.finished:
            self._queue.finished.remove(task_id)
        elif task_id in self._queue.opening:
            self._queue.opening.remove(task_id)

    def __paused_task_handler(self, signal):
        task_id = signal.id
        if task_id in self._queue.running:
            self._full_queue[task_id].source.pause(block=False)
        elif task_id in self._queue.unopened:
            self._pause_opening(task_id)

        if task_id in self._queue.running:
            self._queue.running.remove(task_id)
            self._queue.paused.append(task_id)

        if self._queue_started:
            self.start_queue()

    def __check_process_collector(self, task_id):
        if task_id in self._process_collector:
            del self._process_collector[task_id]

    def __opened_task_handler(self, task_id):
        self.__remove_queue_task(task_id)
        self._queue.ready.append(task_id)

    def __finished_task_handler(self, task_id):
        self.__remove_queue_task(task_id)
        self._queue.finished.append(task_id)

        self._full_queue[task_id].source.close()
        self.__check_process_collector(task_id)

        if self._queue_started:
            self.start_queue()

    def __failed_task_handler(self, task_id, exception):
        self.__remove_queue_task(task_id)
        self._full_queue[task_id].fail(exception)
        self._queue.failed.append(task_id)
        self._full_queue[task_id].reset()

        self.__check_process_collector(task_id)

        if self._queue_started:
            self.start_queue()

    def __stopped_task_handler(self, task_id):
        pass

    def __started_task_handler(self, task_id):
        self.__remove_queue_task(task_id)
        self._queue.running.append(task_id)
コード例 #10
0
class BasicSpider:
    def __init__(self, max_task, url_page_format, max_page=1000, start_page=1):
        self.next_page = start_page
        self.start_page = start_page
        self.max_task = max_task
        self.max_page = max_page
        self._queue = Queue()
        self.url_page_format = url_page_format
        self.page_number_data = [None for _ in range(max_page)]

    def _task_controller(self):
        task_done_counter = 0
        while True:
            page, _ = self._queue.get()
            self._queue.task_done()
            if page is not None:
                task_done_counter += 1

            print(page, task_done_counter)
            if task_done_counter % 50 == 0:
                self.save_addup_data('./data/%s-%s.txt' %
                                     (__name__, time.time()))
            if task_done_counter + self.start_page - 1 >= self.max_page:
                self.save_addup_data('./data/%s-%s.txt' %
                                     (__name__, time.time()))
                print(self.get_total())
                break
            else:
                if self.next_page <= self.max_page:
                    print('NEXT_PAGE: %d' % self.next_page)
                    self._run_next_page()

    def _spider_handler(self, page, **kwargs):
        while not self._addup_wrapper(page, **kwargs):
            pass
        self._queue.put_nowait((page, kwargs))

    def _run_next_page(self, kwargs=None):
        if self.next_page > self.max_page:
            return
        while self.page_number_data[self.next_page - 1] is not None:
            self.next_page += 1
            if self.next_page > self.max_page:
                self._queue.put_nowait((None, kwargs or {}))
                return
        threading.Thread(target=self._spider_handler,
                         args=(self.next_page, ),
                         kwargs=kwargs).start()
        self.next_page += 1

    def run(self):

        for i in range(self.max_task):
            self._run_next_page()
            time.sleep(0.1)

        ctrl = threading.Thread(target=self._task_controller,
                                name='task_controller')
        ctrl.start()

        # for inspection
        while True:
            time.sleep(1)

    def _addup_wrapper(self, page, **kwargs):
        try:
            self.get_page_addup(page, **kwargs)
        except Exception as e:
            print('===========error')
            print(page)
            print(e)
            # print_exc()
            print('===========error')
        else:
            return True
        return False

    def save_addup_data(self, name):
        tl = [str(i) for i in self.page_number_data]
        with open(name, 'w') as f:
            f.write('\n'.join(tl))

    def get_bs4(self, page):
        req = requests.get(self.url_page_format % page, headers=HEADERS)
        text = req.text
        bs4 = BeautifulSoup(text, 'lxml')
        return bs4

    def get_page_addup(self, page, **kwargs):
        return

    def get_total(self):
        if list(self.check_page_number()):
            raise ValueError('addup data is incomplete ')

        counter = 0
        for i in self.page_number_data:
            counter += i
        return counter

    def check_page_number(self):
        for i, v in enumerate(self.page_number_data):
            if v is None:
                yield i

    def load_page_data(self, filepath):
        self.page_number_data = []
        with open(filepath, 'r') as f:
            data_str = f.read()
        for i in data_str.split('\n'):
            if i:
                if i.strip() == 'None' or i.strip() == '0':
                    self.page_number_data.append(None)
                else:
                    self.page_number_data.append(int(i.strip()))
コード例 #11
0
ファイル: model.py プロジェクト: yili9111/nextgisweb
class TilestorWriter:
    __instance = None

    def __init__(self):
        if TilestorWriter.__instance is None:
            self.queue = Queue(maxsize=QUEUE_MAX_SIZE)
            self.cstart = None

            self._worker = Thread(target=self._job)
            self._worker.daemon = True
            self._worker.start()

    @classmethod
    def getInstance(cls):
        if cls.__instance is None:
            cls.__instance = TilestorWriter()
        return cls.__instance

    def put(self, payload):
        cstart = self.cstart
        if cstart is not None:
            cdelta = time() - cstart
            if cdelta > QUEUE_STUCK_TIMEOUT:
                raise TileWriterQueueStuckException(
                    "Tile writer queue is stuck for {} seconds.".format(
                        cdelta))

        try:
            self.queue.put_nowait(payload)
        except Full:
            raise TileWriterQueueFullException(
                "Tile writer queue is full at maxsize {}.".format(
                    self.queue.maxsize))

    def _job(self):
        while True:
            self.cstart = None
            data = self.queue.get()
            self.cstart = time()

            # Tile cache writer may fall sometimes in case of database connecti
            # problem for example. So we just skip a tile with error and log an
            # exception.
            try:

                z, x, y = data['tile']
                tstamp = int(
                    (datetime.utcnow() - TIMESTAMP_EPOCH).total_seconds())

                img = data['img']

                colortuple = imgcolor(img)
                color = pack_color(
                    colortuple) if colortuple is not None else None

                with transaction.manager:
                    conn = DBSession.connection()
                    conn.execute(db.sql.text(
                        'DELETE FROM tile_cache."{0}" WHERE z = :z AND x = :x AND y = :y; '
                        'INSERT INTO tile_cache."{0}" (z, x, y, color, tstamp) '
                        'VALUES (:z, :x, :y, :color, :tstamp)'.format(
                            data['uuid'])),
                                 z=z,
                                 x=x,
                                 y=y,
                                 color=color,
                                 tstamp=tstamp)

                    # Force zope session management to commit changes
                    mark_changed(DBSession())

                if color is None:
                    buf = BytesIO()
                    img.save(buf, format='PNG', compress_level=3)

                    tilestor = get_tile_db(data['db_path'])
                    tilestor.execute(
                        "DELETE FROM tile WHERE z = ? AND x = ? AND y = ?",
                        (z, x, y))

                    try:
                        tilestor.execute(
                            "INSERT INTO tile VALUES (?, ?, ?, ?, ?)",
                            (z, x, y, tstamp, buf.getvalue()))
                    except sqlite3.IntegrityError:
                        # NOTE: Race condition with other proccess may occurs here.
                        # TODO: ON CONFLICT DO ... in SQLite >= 3.24.0 (python 3)
                        pass

            except Exception as exc:
                _logger.exception("Uncaught exception in tile writer: %s",
                                  exc.message)

            if 'answer_queue' in data:
                data['answer_queue'].put_nowait(None)
コード例 #12
0
ファイル: logger.py プロジェクト: uber/clay
class TCPHandler(logging.Handler):
    '''
    Python logging handler for sending JSON formatted messages over
    TCP, optionally wrapping the connection with TLSv1
    '''
    def __init__(self, host, port, ssl_ca_file=None):
        '''
        Instantiate a TCPHandler with the intent of connecting to the
        given host (string) and port (int) with or without using SSL/TLSv1
        '''
        logging.Handler.__init__(self)
        self.host = host
        self.port = port
        self.ssl_ca_file = ssl_ca_file
        self.sock = None
        self.queue = Queue(LOG_QUEUE_SIZE)
        self.connect_wait = BACKOFF_INITIAL
        self.raiseExceptions = 0

        self.hostname = socket.gethostname()
        if self.hostname.find('.') != -1:
            self.hostname = self.hostname.split('.', 1)[0]

        self.sender = threading.Thread(target=self.run)
        self.sender.setDaemon(True)
        self.sender.start()

    def connect(self):
        '''
        Create a connection with the server, sleeping for some
        period of time if connection errors have occurred recently.
        '''
        self.sock = socket.socket()
        if self.ssl_ca_file:
            self.sock = ssl.wrap_socket(self.sock,
                ssl_version=ssl.PROTOCOL_TLSv1,
                cert_reqs=ssl.CERT_REQUIRED,
                ca_certs=self.ssl_ca_file)

        INTERNAL_LOG.debug('Connecting (backoff: %.03f)' %
            self.connect_wait)
        time.sleep(self.connect_wait)
        self.sock.connect((self.host, self.port))

    def jsonify(self, record):
        '''
        Translate a LogRecord instance into a json_event
        '''
        timestamp = datetime.utcfromtimestamp(record.created)
        timestamp = timestamp.isoformat()

        fields = {
            'level': record.levelname,
            'filename': record.pathname,
            'lineno': record.lineno,
            'method': record.funcName,
        }
        if record.exc_info:
            fields['exception'] = str(record.exc_info)
            fields['traceback'] = format_exc(record.exc_info)

        log = {
            '@source_host': self.hostname,
            '@timestamp': timestamp,
            '@tags': [record.name],
            '@message': record.getMessage(),
            '@fields': fields,
        }
        return json.dumps(log)

    def emit(self, record):
        '''
        Send a LogRecord object formatted as json_event via a
        queue and worker thread.
        '''
        self.queue.put_nowait(record)

    def run(self):
        '''
        Main loop of the logger thread. All network I/O and exception handling
        originates here. Strings are consumed from self.queue and sent to
        self.sock, creating a new connection if necessary.

        If any exceptions are caught, the message is put() back on the queue
        and the exception is allowed to propagate up through
        logging.Handler.handleError(), potentially causing this thread to abort.
        '''
        INTERNAL_LOG.debug('Log I/O thread started')
        while True:
            record = self.queue.get()
            if record is None:
                break

            jsonrecord = self.jsonify(record)
            jsonrecord = '%s\n' % jsonrecord

            try:
                if self.sock is None:
                    self.connect()
                self.send(jsonrecord)
            except Exception:
                # This exception will be silently ignored and the message
                # requeued unless self.raiseExceptions=1
                self.queue.put(record)
                self.handleError(record)
            self.queue.task_done()
        INTERNAL_LOG.debug('Log I/O thread exited cleanly')

    def send(self, data):
        '''
        Keep calling SSLSocket.write until the entire message has been sent
        '''
        while len(data) > 0:
            if self.ssl_ca_file:
                sent = self.sock.write(data)
            else:
                sent = self.sock.send(data)
            data = data[sent:]
        self.connect_wait = BACKOFF_INITIAL

    def handleError(self, record):
        '''
        If an error occurs trying to send the log message, close the connection
        and delegate the exception handling to the superclass' handleError,
        which raises the exception (potentially killing the log thread) unless
        self.raiseExceptions is False.
        http://hg.python.org/cpython/file/e64d4518b23c/Lib/logging/__init__.py#l797
        '''
        INTERNAL_LOG.exception('Unable to send log')
        self.cleanup()
        self.connect_wait *= BACKOFF_MULTIPLE
        logging.Handler.handleError(self, record)

    def cleanup(self):
        '''
        If the socket to the server is still open, close it. Otherwise, do
        nothing.
        '''
        if self.sock:
            INTERNAL_LOG.info('Closing socket')
            self.sock.close()
            self.sock = None

    def close(self):
        '''
        Send a sentinel None object to the worker thread, telling it to exit
        and disconnect from the server.
        '''
        self.queue.put(None)
        self.cleanup()
コード例 #13
0
class KeyboardCapture(threading.Thread):
    """Implementation of KeyboardCapture for OSX."""

    _KEYBOARD_EVENTS = set([kCGEventKeyDown, kCGEventKeyUp])

    def __init__(self):
        threading.Thread.__init__(self, name="KeyboardEventTapThread")
        self._loop = None
        self._event_queue = Queue()  # Drained by event handler thread.

        self._suppressed_keys = set()
        self.key_down = lambda key: None
        self.key_up = lambda key: None

        # Returning the event means that it is passed on
        # for further processing by others.
        #
        # Returning None means that the event is intercepted.
        #
        # Delaying too long in returning appears to cause the
        # system to ignore the tap forever after
        # (https://github.com/openstenoproject/plover/issues/484#issuecomment-214743466).
        #
        # This motivates pushing callbacks to the other side
        # of a queue of received events, so that we can return
        # from this callback as soon as possible.
        def callback(proxy, event_type, event, reference):
            SUPPRESS_EVENT = None
            PASS_EVENT_THROUGH = event

            # Don't pass on meta events meant for this event tap.
            is_unexpected_event = event_type not in self._KEYBOARD_EVENTS
            if is_unexpected_event:
                if event_type == kCGEventTapDisabledByTimeout:
                    # Re-enable the tap and hope we act faster next time
                    CGEventTapEnable(self._tap, True)
                    plover.log.warning("Keystrokes may have been missed. " + "Keyboard event tap has been re-enabled. ")
                return SUPPRESS_EVENT

            # Don't intercept the event if it has modifiers, allow
            # Fn and Numeric flags so we can suppress the arrow and
            # extended (home, end, etc...) keys.
            suppressible_modifiers = (
                kCGEventFlagMaskNumericPad | kCGEventFlagMaskSecondaryFn | kCGEventFlagMaskNonCoalesced
            )
            has_nonsupressible_modifiers = CGEventGetFlags(event) & ~suppressible_modifiers
            if has_nonsupressible_modifiers and event_type == kCGEventKeyDown:
                return PASS_EVENT_THROUGH

            keycode = CGEventGetIntegerValueField(event, kCGKeyboardEventKeycode)
            key = KEYCODE_TO_KEY.get(keycode)
            self._async_dispatch(key, event_type)
            if key in self._suppressed_keys:
                return SUPPRESS_EVENT
            return PASS_EVENT_THROUGH

        self._tap = CGEventTapCreate(
            kCGSessionEventTap,
            kCGHeadInsertEventTap,
            kCGEventTapOptionDefault,
            CGEventMaskBit(kCGEventKeyDown) | CGEventMaskBit(kCGEventKeyUp),
            callback,
            None,
        )
        if self._tap is None:
            # Todo(hesky): See if there is a nice way to show the user what's
            # needed (or do it for them).
            raise Exception("Enable access for assistive devices.")
        CGEventTapEnable(self._tap, False)

    def run(self):
        source = CFMachPortCreateRunLoopSource(None, self._tap, 0)
        handler_thread = threading.Thread(target=self._event_handler, name="KeyEventDispatcher")
        handler_thread.start()
        self._loop = CFRunLoopGetCurrent()
        CFRunLoopAddSource(self._loop, source, kCFRunLoopCommonModes)
        CGEventTapEnable(self._tap, True)

        CFRunLoopRun()

        # Wake up event handler.
        self._event_queue.put_nowait(None)
        handler_thread.join()
        CFMachPortInvalidate(self._tap)
        CFRelease(self._tap)
        CFRunLoopSourceInvalidate(source)

    def cancel(self):
        CFRunLoopStop(self._loop)
        self.join()
        self._loop = None

    def suppress_keyboard(self, suppressed_keys=()):
        self._suppressed_keys = set(suppressed_keys)

    def _async_dispatch(self, key, event_type):
        """
        Dispatches a key string in KEYCODE_TO_KEY.values() and a CGEventType
        to the appropriate KeyboardCapture callback
        without blocking execution of its caller.
        """
        if key is None:
            return

        is_keyup = event_type == kCGEventKeyUp
        pair = (key, is_keyup)
        self._event_queue.put_nowait(pair)

    def _event_handler(self):
        """
        Event dispatching thread launched during run().
        Loops until None is received from _event_queue.
        Avoids busy-waiting by blocking on _event_queue.

        In normal operation, it gets a pair of
        (key_string, is_keyup_bool) from _event_queue
        and routes the string to self.key_up or self.key_down,
        then waits for a new pair to arrive.
        """
        while True:
            pair = self._event_queue.get(block=True, timeout=None)
            if pair is None:
                return

            key, is_keyup = pair
            handler = self.key_up if is_keyup else self.key_down
            handler(key)
コード例 #14
0
class eptWorkerFabric(object):
    """ tracks cache and settings for each fabric actively being monitored, also provides useful
        notification and push_event functions
    """
    def __init__(self, fabric):
        self.fabric = fabric
        self.start_ts = time.time()
        self.settings = eptSettings.load(fabric=fabric, settings="default")
        self.cache = eptCache(fabric)
        self.dns_cache = DNSCache()
        self.db = get_db()
        self.watcher_paused = False
        self.session = None
        self.notify_queue = None
        self.notify_thread = None
        self.init()

    def init(self):
        """ initialize settings after fabric settings as been loaded """
        # epm parser used with eptWorker for creating pseudo eevents
        self.ept_epm_parser = eptEpmEventParser(self.fabric,
                                                self.settings.overlay_vnid)
        # one time calculation for email address and syslog server (which requires valid port)
        self.email_address = self.settings.email_address
        self.syslog_server = self.settings.syslog_server
        self.syslog_port = self.settings.syslog_port
        if len(self.email_address) == 0:
            self.email_address = None
        if len(self.syslog_server) == 0:
            self.syslog_server = None
            self.syslog_port = None

    def close(self):
        """ stateful close when worker receives FABRIC_STOP for this fabric """
        if self.db is not None:
            self.db.client.close()
        if self.session is not None:
            self.session.close()
        if self.notify_thread is not None:
            self.notify_thread.exit()
        # remove all objects from notify queue
        if self.notify_queue is not None:
            try:
                logger.debug("clearing notify queue (size: %d)",
                             self.notify_queue.qsize())
                while not self.notify_queue.empty():
                    self.notify_queue.get()
            except Exception as e:
                logger.debug("Traceback:\n%s", traceback.format_exc())
                logger.error("failed to execute clear notify queue %s", e)

    def watcher_init(self):
        """ watcher process needs session object for mo sync and notify queue"""
        logger.debug("wf worker init for %s", self.fabric)
        self.notify_queue = Queue(maxsize=NOTIFY_QUEUE_MAX_SIZE)
        logger.debug("starting worker fabric apic session")
        self.session = get_apic_session(self.fabric)
        if self.session is None:
            logger.error("failed to get session object within worker fabric")

        # watcher will also send notifications within a background thread to ensure that
        # any delay in syslog or email does not backup other service events
        self.notify_thread = BackgroundThread(func=self.execute_notify,
                                              name="notify",
                                              count=0,
                                              interval=NOTIFY_INTERVAL)
        self.notify_thread.daemon = True
        self.notify_thread.start()

    def settings_reload(self):
        """ reload settings from db """
        logger.debug("reloading settings for %s", self.fabric)
        self.settings.reload()
        self.init()

    def get_uptime_delta_offset(self, delta=None):
        """ return difference between provided delta and current uptime. If the uptime_delta is 
            less than zero, return 0.  If no delta is provided, then return the uptime.
        """
        uptime = time.time() - self.start_ts
        if delta is None: return uptime
        uptime_delta = delta - uptime
        if uptime_delta > 0: return uptime_delta
        return 0

    def push_event(self, table, key, event, per_node=True):
        # wrapper to push an event to eptHistory events list.  set per_node to false to use
        # max_endpoint_event rotate length, else max_per_node_endpoint_events value is used
        if per_node:
            return push_event(
                self.db[table],
                key,
                event,
                rotate=self.settings.max_per_node_endpoint_events)
        else:
            return push_event(self.db[table],
                              key,
                              event,
                              rotate=self.settings.max_endpoint_events)

    def get_learn_type(self, vnid, flags=[]):
        # based on provide vnid and flags return learn type for endpoint:
        #   loopback - if loopback in flags
        #   psvi - if psvi in flags
        #   svi - represents an external SVI (should be set on all external svis)
        #   overlay if vnid is overlay vnid
        #   external if vnid is in eptVnid table with external set to true
        #   else returns 'epg' (default learn type)
        if "loopback" in flags:
            return "loopback"
        elif "psvi" in flags:
            return "psvi"
        elif "svi" in flags:
            return "external"
        elif vnid == self.settings.overlay_vnid:
            return "overlay"
        ept_vnid = self.cache.get_vnid_name(vnid, return_object=True)
        if ept_vnid is not None and ept_vnid.external:
            return "external"
        return "epg"

    def notification_enabled(self, notify_type):
        # return dict with email address, syslog server, syslog port for notify type. If not enabled,
        # then return None for each field. Set notify_type to 'any_email' or 'any_syslog' to force
        # test of a particular notification type
        ret = {
            "enabled": False,
            "email_address": None,
            "syslog_server": None,
            "syslog_port": None
        }
        if notify_type == "move":
            attr = ("notify_move_email", "notify_move_syslog")
        elif notify_type == "stale":
            attr = ("notify_stale_email", "notify_stale_syslog")
        elif notify_type == "offsubnet":
            attr = ("notify_offsubnet_email", "notify_offsubnet_syslog")
        elif notify_type == "clear":
            attr = ("notify_clear_email", "notify_clear_syslog")
        elif notify_type == "rapid":
            attr = ("notify_rapid_email", "notify_rapid_syslog")
        elif notify_type == "any_email":
            # return all notification types enabled
            ret["enabled"] = True
            ret["email_address"] = self.email_address
            return ret
        elif notify_type == "any_syslog":
            # return all notification types enabled
            ret["enabled"] = True
            ret["syslog_server"] = self.syslog_server
            ret["syslog_port"] = self.syslog_port
            return ret
        else:
            logger.warn("invalid notification type '%s", notify_type)
            return ret
        if getattr(self.settings, attr[0]):
            ret["enabled"] = True
            ret["email_address"] = self.email_address
        if getattr(self.settings, attr[1]):
            ret["enabled"] = True
            ret["syslog_server"] = self.syslog_server
            ret["syslog_port"] = self.syslog_port
        return ret

    def send_notification(self,
                          notify_type,
                          subject=None,
                          txt=None,
                          bulk=None):
        # send proper notifications for this fabric.  set notify_type to none to skip enable check
        # and force notification. user can set bulk to list of (subject/txt) tuples to send a list
        # of notifications at the same time.  All notifications must be of the same notify_type.
        success = True
        errmsg = ""
        notify = self.notification_enabled(notify_type)
        if notify["enabled"]:
            if notify["email_address"] is not None:
                emails = []
                if bulk is not None:
                    for (bulk_subject, bulk_txt) in bulk:
                        emails.append({
                            "sender":
                            get_app_config().get("EMAIL_SENDER", None),
                            "receiver":
                            notify["email_address"],
                            "subject":
                            bulk_subject,
                            "msg":
                            bulk_txt,
                        })
                else:
                    emails.append({
                        "sender":
                        get_app_config().get("EMAIL_SENDER", None),
                        "receiver":
                        notify["email_address"],
                        "subject":
                        subject,
                        "msg":
                        txt,
                    })
                # send_email already supports a list of emails, so simply send all at once
                (success, errmsg) = send_emails(settings=self.settings,
                                                dns_cache=self.dns_cache,
                                                emails=emails)
                if not success:
                    logger.warn("failed to send email: %s", errmsg)
            if notify["syslog_server"] is not None:
                if bulk is not None:
                    for (bulk_subject, bulk_txt) in bulk:
                        syslog(
                            bulk_txt,
                            dns_cache=self.dns_cache,
                            server=notify["syslog_server"],
                            server_port=notify["syslog_port"],
                        )
                else:
                    syslog(
                        txt,
                        dns_cache=self.dns_cache,
                        server=notify["syslog_server"],
                        server_port=notify["syslog_port"],
                    )
            return (success, errmsg)
        else:
            logger.debug("skipping send notification as '%s' is not enabled",
                         notify_type)
            return (False, "notification not enabled")

    def queue_notification(self, notify_type, subject, txt):
        # queue notification that will be sent at next iteration of NOTIFY_INTERVAL
        if self.notify_queue is None:
            logger.error("notify queue not initialized for worker fabric")
            return
        try:
            logger.debug("enqueuing %s notification (queue size %d)",
                         notify_type, self.notify_queue.qsize())
            self.notify_queue.put_nowait(
                (notify_type, subject, txt, time.time()))
        except Full as e:
            logger.error(
                "failed to enqueue notification, queue is full (size: %s)",
                self.notify_queue.qsize())

    def execute_notify(self):
        # send any notifications sitting in queue, log number of notifications sent and max queue
        # time. We want to support bulk notifications (mainly for email to prevent multiple login
        # on smpt_relay_auth setups) so this function will sort based on notify type and execute
        # send notification with bulk flag.
        msgs = {}  # indexed by notify type and contains tuple (subject,txt)
        count = 0
        max_queue_time = 0
        while not self.notify_queue.empty():
            (notify_type, subject, txt, q_ts) = self.notify_queue.get()
            count += 1
            q_time = time.time() - q_ts
            if q_time > max_queue_time:
                max_queue_time = q_time
            if notify_type not in msgs:
                msgs[notify_type] = []
            msgs[notify_type].append((subject, txt))
        for notify_type in msgs:
            self.send_notification(notify_type, bulk=msgs[notify_type])
        if count > 0:
            logger.debug("sent %s notifications, max queue time %0.3f sec",
                         count, max_queue_time)
コード例 #15
0
class QueueWorker(BaseWorker):
    TIMEOUT_ATTEMPTS = 5
    QUEUE_SIZE = -1  # inf
    END_EVENT = object()
    NAME = 'polyaxon.QueueWorker'

    def __init__(self, timeout=None, queue_size=None):
        super(QueueWorker, self).__init__()
        self._queue = Queue(queue_size or self.QUEUE_SIZE)
        self._timeout = timeout if timeout is not None else settings.TIMEOUT

    def atexit(self):
        with self._lock:
            if not self.is_alive():
                return

            self._queue.put_nowait(self.END_EVENT)

            def timeout_join(timeout, queue):
                end = time() + timeout
                queue.all_tasks_done.acquire()
                try:
                    while queue.unfinished_tasks:
                        current_timeout = end - time()
                        if current_timeout <= 0:
                            # timed out
                            return False

                        queue.all_tasks_done.wait(timeout=current_timeout)

                    return True

                finally:
                    queue.all_tasks_done.release()

            # ensure wait
            timeout = min(settings.MIN_TIMEOUT,
                          self._timeout / self.TIMEOUT_ATTEMPTS)
            if timeout_join(timeout=timeout, queue=self._queue):
                timeout = 0
            else:
                # Queue still has message, try another time
                size = self._queue.qsize()

                if not settings.IN_CLUSTER:
                    print(
                        'Polyaxon %s is attempting to send %i pending messages'
                        % (self.NAME, size))
                    print('Waiting up to {} seconds'.format(self._timeout))
                    if os.name == 'nt':
                        print('Press Ctrl-Break to quit')
                    else:
                        print('Press Ctrl-C to quit')

            sleep(settings.MIN_TIMEOUT)  # Allow tasks to get executed
            while timeout > 0 and not timeout_join(timeout=timeout,
                                                   queue=self._queue):
                timeout = min(timeout + self._timeout / self.TIMEOUT_ATTEMPTS,
                              self._timeout - timeout)

            size = self._queue.qsize()
            if size > 0:
                print(
                    'Polyaxon %s timed out and did not manage to send %i messages'
                    % (self.NAME, size))

            self._thread = None

    def stop(self, timeout=None):
        with self._lock:
            if self._thread:
                self._queue.put_nowait(self.END_EVENT)
                self._thread.join(timeout=timeout)
                self._thread = None
                self._thread_for_pid = None

    def queue(self, callback, *args, **kwargs):
        self.is_running()
        self._queue.put_nowait((callback, args, kwargs))

    def _target(self):
        while True:
            record = self._queue.get()
            try:
                if record is self.END_EVENT:
                    break
                callback, args, kwargs = record
                try:
                    callback(*args, **kwargs)
                except Exception:
                    logger.error('Failed processing job', exc_info=True)
            finally:
                self._queue.task_done()

            sleep(0)
コード例 #16
0
ファイル: zmqsocket.py プロジェクト: pennmem/RAMControl
class SocketServer(object):
    """ZMQ-based socket server for sending and receiving messages from the host
    PC.

    Because of the weird way in which PyEPL handles events, we can't run this as
    its own thread, but instead have to poll for events in the general PyEPL
    machinery. In the future, we should clean up PyEPL entirely so that it does
    not block other threads (amongst other reasons).

    :param zmq.Context ctx:

    """
    def __init__(self, ctx=None):
        self.ctx = ctx or zmq.Context()

        self._handlers = []

        self.sock = self.ctx.socket(zmq.PAIR)
        self._bound = False

        self.poller = zmq.Poller()
        self.poller.register(self.sock, zmq.POLLIN)

        # Outgoing message queue
        self._out_queue = Queue()

        # time of last sent heartbeat message
        self._last_heartbeat = 0.

        # Logging of sent and received messages.
        self.logger = create_logger("network")

    def join(self):
        """Block until all outgoing messages have been processed."""
        self.logger.warning("Joining doesn't work yet; doing nothing...")
        # self._out_queue.join()

    def bind(self, address="tcp://*:8889"):
        """Bind the socket to start listening for connections.

        :param str address: ZMQ address string

        """
        self.sock.bind(address)
        self._bound = True

    def register_handler(self, func):
        """Register a message handler.

        :param callable func: Handler function which takes the message as its
            only argument.

        """
        self.logger.debug("Adding handler: %s", func.__name__)
        self._handlers.append(func)

    def enqueue_message(self, msg):
        """Submit a new outgoing message to the queue."""
        self._out_queue.put_nowait(msg)

    def send(self, msg):
        """Immediately transmit a message to the host PC. It is advisable to not
        call this method directly in most cases, but rather enqueue a message to
        be sent via :meth:`enqueue_message`.

        :param RAMMessage msg: Message to send.

        """
        out = msg.jsonize()
        try:
            self.log_message(msg, incoming=False)
            self.sock.send(out, zmq.NOBLOCK)
        except:
            self.logger.error("Sending failed!")

    def send_heartbeat(self):
        """Convenience method to send a heartbeat message to the host PC."""
        if time.time() - self._last_heartbeat >= 1.0:
            self.send(HeartbeatMessage())
            self._last_heartbeat = time.time()

    def log_message(self, message, incoming=True):
        """Log a message to the log file."""
        if not incoming:
            message = message.to_dict()

        message["in_or_out"] = "in" if incoming else "out"
        self.logger.info("%s", json.dumps(message))

    def handle_incoming(self):
        events = self.poller.poll(1)
        if self.sock in dict(events):
            try:
                msg = self.sock.recv_json()
                self.log_message(msg, incoming=True)
            except:
                self.logger.error("Unable to decode JSON.", exc_info=True)
                return

            for handler in self._handlers:
                try:
                    handler(msg)
                except:
                    self.logger.error("Error handling message", exc_info=True)
                    continue

    def handle_outgoing(self):
        try:
            while not self._out_queue.empty():
                msg = self._out_queue.get_nowait()
                self.send(msg)
                self._out_queue.task_done(
                )  # so we can join the queue elsewhere
        except:
            self.logger.error("Error in outgoing message processing",
                              exc_info=True)

    def update(self):
        """Call periodically to check for incoming messages and/or send messages
        in the outgoing queue.

        """
        self.handle_incoming()
        self.handle_outgoing()
コード例 #17
0
ファイル: multi_client.py プロジェクト: xunbian/Nbdler
class MultiClientConsole(AbstractConsole):
    def __init__(self, file, url, buffsize, block_size, maxthread,
                 exception_callback):
        self._blocks = []
        self._block_size = block_size

        self._mxthread = maxthread
        self._buffsize = buffsize

        self.__total = int(ceil(file.getsize() / self._block_size))

        self.file = file
        self._url = url

        self._tpm = ThreadCollector()

        self._client_callback_queue = Queue()
        self.__exception_signal = exception_callback

        self.__buff_counter = 0

        self._buff_lock = Lock()
        self.runflag = False

        self._acum_time = AccumulatedTime(0)
        self._inst_maker = InstSpeedMaker()
        self._saver = None

        self._external_callback_queue = None

    def insert(self, content, progress, abs_grid=None, rel_grid=None):
        block = Block(content,
                      progress,
                      self._block_size,
                      abs_grid=abs_grid,
                      rel_grid=rel_grid)

        for i, v in enumerate(self._blocks):
            if v.begin > block.begin:
                self._blocks.insert(i, block)
                break
        else:
            self._blocks.append(block)

        return block

    def locate(self, block_index):
        c = 0
        for i in self._blocks:
            c += i.length
            if block_index < c:
                return i

    def getfullmap(self):
        for b in self._blocks:
            for i in b.abs_grid:
                yield i

    def __iter__(self):
        return self.getfullmap()

    def __getitem__(self, item):
        if type(item) is not int:
            raise ValueError()

        if item >= self.__total:
            raise IndexError()

        c = 0
        for i in self._blocks:
            c += i.length
            if item < c:
                return i.grid[item - c + i.length]

        raise IndexError()

    def prepare(self):

        self.file.open()

    def run(self):
        """ Run download task. """
        if not self._blocks:
            raise RuntimeError(
                'cannot find any download block in the console.')
        self.runflag = True
        self._acum_time.start()
        self._inst_maker.start(self.get_go_inc())
        for block in self._blocks:
            block.clear_signal()
            self._tpm.put(block.handler,
                          GROUP_CLIENT,
                          args=(self._client_callback_queue, ),
                          owner=block)
        self._tpm.start_group(GROUP_CLIENT)

        self._tpm.put(self._client_callback_handle_thread,
                      GROUP_CONTROLLER).start()
        self._tpm.put(self._inst_speed_capture_thread, GROUP_INSTSPEED).start()

        if self._mxthread > self.get_client_cnt():
            self._make_slice_request()

    def get_block_size(self):
        """ Return console's unit block size. """
        return self._block_size

    def get_go_inc(self):
        """ Return byte size downloaded. """
        inc = 0
        for v in self._blocks:
            inc += v.get_go_inc()
        return inc

    def pause(self, block=True, timeout=None):
        """ Pause downloading. block until finish pausing only when block == True. """
        self.runflag = False
        self._client_callback_queue.put_nowait(SIGNAL_TASK_PAUSE())
        if block:
            self._tpm.join(timeout=timeout)

    stop = pause

    def getavgspeed(self):
        """ Return average download speed. """
        return self.get_go_inc() / (self._acum_time.getinctime()
                                    or float('inf'))

    def getinstspeed(self):
        """ Return instant download speed. """
        return self._inst_maker.getspeed()

    def _inst_speed_capture_thread(self):
        while self.runflag:
            self._inst_maker.capture(self.get_go_inc())
            freq = INST_SPEED_REFRESH_FREQUENCY
            if freq > 0:
                sleep(1 / freq)
            else:
                break

    def get_time_left(self):
        """ Return remaining time to finish the download task. """
        speed = self.getavgspeed()
        return self.get_byte_left() / speed if speed else float('inf')

    def get_byte_left(self):
        """ Return remaining byte to finish the download task. """
        return self.file.getsize() - self.get_go_inc()

    def get_online_cnt(self):
        """ Return the number of running client-threads. """
        return len(self._tpm.get_group(GROUP_CLIENT))

    def get_client_cnt(self):
        """ Return the number of running clients. """
        acum = 0
        for v in self._blocks:
            if not v.is_go_finished():
                acum += 1
        return acum

    def is_go_finished(self):
        """ Return True if download finished (finish writing to file is not necessary). """
        for v in self._blocks:
            if not v.is_go_finished():
                return False
        return self._check_complete()

    def is_finished(self):
        """ Return True if download task finished. """
        for v in self._blocks:
            if not v.is_finished():
                return False
        return self._check_complete()

    def _client_callback_handle_thread(self):
        """ A thread handler about callback signal. """
        while True:
            signal = self._client_callback_queue.get()
            if signal.id == ID_TASK_BUFF:
                self.__buffer_signal_handler(signal.content)
            elif signal.id == ID_TASK_SLICE:
                self.__slice_feedback_handler(signal.content)
            elif signal.id == ID_THREAD_END:
                self.__client_thread_end_handler(signal.content)
            elif signal.id == ID_URL_STATUS:
                self.__url_status_handler(signal.content)
            elif signal.id == ID_TASK_PAUSE:
                self.__pause_handler()
            elif signal.id == ID_TASK_FINISH:
                self.__finish_handler()
            elif signal.id == ID_TASK_STOP:
                self.__stop_handler()
            elif signal.id == ID_CALLBACK_END:
                break

            self._client_callback_queue.task_done()

        self._client_callback_queue.task_done()

    def _buffer_release_thread(self):
        """ A thread handling writing buffer to file. """
        with self._buff_lock:
            for v in self._blocks:
                v.release_buffer(self.file)
            if self._saver:
                self._saver.dump()
        gc.collect()

    def _find_block_from_client(self, client, defalut=None):
        """ Find the block"""
        for v in self._blocks:
            if client in v:
                return v
        return defalut

    def _make_slice_request(self):
        """ Make a slice request. """
        maxblock = sorted(self._blocks, key=lambda i: i.margin,
                          reverse=True)[0]
        margin_len = maxblock.margin
        put_begin = maxblock.begin + (maxblock.length - margin_len) + int(
            ceil(margin_len / 2))
        put_end = maxblock.end
        signal = SIGNAL_TASK_SLICE(
            (put_begin * self._block_size, put_end * self._block_size))
        maxblock.send_signal(signal)

    def _check_complete(self):
        """ Return True if total blocks length completely match. """
        acum_len = 0
        for v in self._blocks:
            v.calibration()
            acum_len += v.length
        return self.__total == acum_len

    def __client_thread_end_handler(self, client):
        """ Callback handler when signal.id == ID_THREAD_END:
                <CLIENT THREAD END OF LIFE>
        """
        block = self._find_block_from_client(client)
        srcwrapper = self._url.getwrapper(block.getsource())
        if self.runflag:
            if block.is_go_finished():
                if self._mxthread > self.get_client_cnt():
                    self._make_slice_request()
                srcwrapper.disuse()

            else:
                self._tpm.put(client.run,
                              GROUP_CLIENT,
                              args=(self._client_callback_queue, ),
                              owner=block).start()

        if self.is_go_finished():
            self._buffer_release_thread()
            self._finish_task()

    def _finish_task(self):
        self._client_callback_queue.put_nowait(SIGNAL_TASK_FINISH())

        if self._external_callback_queue:
            self._external_callback_queue.queue.put_nowait(
                SIGNAL_TASK_FINISH(
                    Signal(id=self._external_callback_queue.id)))

    def _failed_task(self, exception):
        self.__exception_signal.put_nowait(SIGNAL_EXCEPTION(exception))

        if self._external_callback_queue:
            self._external_callback_queue.queue.put_nowait(
                SIGNAL_EXCEPTION(
                    Signal(id=self._external_callback_queue.id,
                           content=exception)))

    def __stop_handler(self):
        self._buffer_release_thread()
        self.file.close()
        self._acum_time.stop()
        self._inst_maker.stop()
        self.runflag = False
        self._client_callback_queue.put_nowait(SIGNAL_CALLBACK_END())
        self.__exception_signal.put_nowait(SIGNAL_TASK_STOP())
        if self._external_callback_queue:
            self._external_callback_queue.queue.put_nowait(
                SIGNAL_TASK_STOP(Signal(id=self._external_callback_queue.id)))

    def __finish_handler(self):
        self._client_callback_queue.put_nowait(SIGNAL_TASK_STOP())

    def __slice_feedback_handler(self, slice):
        """ Callback handler when signal.id == ID_SLICE:
                <SLICE FEEDBACK>
        """
        if slice.range:
            srcwrapper = self._url.get_min_avl_used()
            if not srcwrapper:
                srcwrapper = self._url.get_min_used()

            prog = Progress(slice.range)
            client = slice.client(srcwrapper.get(), prog)
            block = self.insert(client, prog)
            srcwrapper.use()
            if self.runflag:
                self._tpm.put(block.handler,
                              GROUP_CLIENT,
                              args=(self._client_callback_queue, ),
                              owner=block).start()

        if self._mxthread > self.get_client_cnt():
            self._make_slice_request()

    def __buffer_signal_handler(self, byte):
        """ Callback handler when signal.id == ID_BUFF:
                <BUFFER COUNTER>
        """
        self._url.clear_counter()
        self.__buff_counter += byte
        if self.__buff_counter >= self._buffsize:
            self.__buff_counter = 0
            self._tpm.put(self._buffer_release_thread, GROUP_RELEASE).start()

    def __url_status_handler(self, url_status):
        """ Callback handler when signal.id == ID_URL_STATUS:
                <URL SIGNAL>
        """
        client = url_status.content.client
        exception = url_status.content.exception
        source = client.getsource()
        srcwrapper = self._url.getwrapper(source)
        block = self._find_block_from_client(client)
        try:
            if url_status.id == ID_NORMAL:
                srcwrapper.clear_counter()
            elif url_status.id == ID_TIMEOUT:
                srcwrapper.timeout()
            elif url_status.id == ID_CRASH:
                srcwrapper.crash()
            elif url_status.id == ID_UNKNOWN:
                srcwrapper.unknown()
            elif url_status.id == ID_GAIERROR:
                srcwrapper.network_down()
        except (URLCrash, URLTimeout, URLUnknownError,
                NetworkDisconnected) as e:

            srcwrapper.disuse()
            nextwrapper = self._url.get_min_avl_used()
            if not nextwrapper:
                nextwrapper = self._url.get_next(srcwrapper.source.id)

                if self._url.is_all_crashed():
                    self._failed_task(e)
                else:
                    block.send_signal(SIGNAL_WAIT(1))

            nextwrapper.use_anyway()

            block.send_signal(SIGNAL_SWITCH(nextwrapper.get()))

    def __pause_handler(self):
        """ Callback handler when signal.id == ID_PAUSE:
                <PAUSE SIGNAL>
        """
        for thread in self._tpm.get_group(GROUP_CLIENT):
            thread.owner.send_signal(SIGNAL_TASK_PAUSE())

        self._tpm.wait(GROUP_SLICER)
        self._tpm.wait(GROUP_CLIENT)
        self._tpm.wait(GROUP_RELEASE)

        self._client_callback_queue.put_nowait(SIGNAL_TASK_STOP())

        if self._external_callback_queue:
            self._external_callback_queue.queue.put_nowait(
                SIGNAL_TASK_PAUSE(Signal(id=self._external_callback_queue.id)))

    def dump_data(self):
        return ConsoleDumpedData(block_data=list(
            [tuple(b.dump_data()) for b in self._blocks]),
                                 acum_time=self._acum_time.getinctime())

    def load(self, dumped_data):
        data = ConsoleDumpedData(*dumped_data)
        self._acum_time = AccumulatedTime(data.acum_time)

        for i in data.block_data:
            b = BlockDumpedData(*i)
            p = ProgressDumpedData(*b.progress)
            prog = Progress(p.range, p.go_inc, p.done_inc)
            srcwrapper = self._url.get(b.url_id)
            source = srcwrapper.get()
            client = build_client(source, prog)
            srcwrapper.use()
            self.insert(client,
                        prog,
                        rel_grid=list([GridCell(*i) for i in b.rel_grid]))

    def install_saver(self, saver):
        self._saver = saver

    def install_external_callback_queue(self, signal_queue):
        self._external_callback_queue = signal_queue

    def join(self, timeout=None):
        self._tpm.join(timeout)

    def getBuffCnter(self):
        return self.__buff_counter
コード例 #18
0
ファイル: single_client.py プロジェクト: xunbian/Nbdler
class SingleClientConsole(AbstractConsole):
    def __init__(self, file, url, buffsize, block_size, maxthread,
                 exception_callback):
        self._block_size = block_size

        self._buffsize = buffsize
        self._block = None
        self.file = file
        self._url = url
        self._tpm = ThreadCollector()
        self._client_callback_queue = Queue()
        self.__exception_signal = exception_callback

        self.__buff_counter = 0
        self._buff_lock = Lock()
        self.runflag = False

        self._acum_time = AccumulatedTime(0)
        self._inst_maker = InstSpeedMaker()
        self._saver = None

        self.__callback_queue = None

    def insert(self, client, progress):
        if self._block:
            raise RuntimeError(
                'cannot insert more than one block into single client console.'
            )
        self._block = Block(client, progress, self._block_size)

    def prepare(self):
        self.file.open()

    def run(self):
        self.runflag = True
        self._block.clear()
        self._acum_time.start()
        self._inst_maker.start(self.get_go_inc())
        self._tpm.put(self._block.handler,
                      GROUP_CLIENT,
                      args=(self._client_callback_queue, ),
                      owner=self._block).start()
        self._tpm.put(self._client_callback_handle_thread,
                      GROUP_CONTROLLER).start()
        self._tpm.put(self._inst_speed_capture_thread, GROUP_INSTSPEED).start()

    def __stop_handler(self):
        self.file.close()
        self._acum_time.stop()
        self._inst_maker.stop()
        self.runflag = False
        self._client_callback_queue.put_nowait(SIGNAL_CALLBACK_END())
        self.__exception_signal.put_nowait(SIGNAL_TASK_STOP())
        if self.__callback_queue:
            self.__callback_queue.queue.put_nowait(
                SIGNAL_TASK_STOP(Signal(id=self.__callback_queue.id)))

    def pause(self, block=True, timeout=None):
        self.runflag = False
        self._client_callback_queue.put_nowait(SIGNAL_TASK_PAUSE())
        if block:
            self._tpm.wait(GROUP_CONTROLLER, timeout=timeout)

    def is_finish_go(self):
        return self._block.is_go_finished()

    def get_byte_left(self):
        if self.is_finish_go():
            return 0
        else:
            return float('inf')

    def get_time_left(self):
        if self.is_finish_go():
            return 0
        else:
            return float('inf')

    def getavgspeed(self):
        return self.get_go_inc() / (self._acum_time.getinctime()
                                    or float('inf'))

    def get_go_inc(self):
        return self._block.get_go_inc()

    def getinstspeed(self):
        return self._inst_maker.getspeed()

    def _inst_speed_capture_thread(self):
        while self.runflag:
            self._inst_maker.capture(self.get_go_inc())
            freq = INST_SPEED_REFRESH_FREQUENCY
            if freq > 0:
                sleep(1 / freq)
            else:
                break

    def _client_callback_handle_thread(self):
        while True:
            signal = self._client_callback_queue.get()
            if signal.id == ID_TASK_BUFF:
                self.__buffer_signal_handler(signal.content)
            elif signal.id == ID_THREAD_END:
                self.__client_thread_end_handler(signal.content)
            elif signal.id == ID_URL_STATUS:
                self.__url_status_handler(signal.content)
            elif signal.id == ID_TASK_PAUSE:
                self.__pause_handler()
            elif signal.id == ID_TASK_FINISH:
                self.__stop_handler()
            elif signal.id == ID_TASK_STOP:
                break

            self._client_callback_queue.task_done()

        self._client_callback_queue.task_done()

    def is_finished(self):
        return self._block.is_finished()

    def _finish_task(self):
        self._client_callback_queue.put_nowait(SIGNAL_TASK_FINISH())

        if self.__callback_queue:
            self.__callback_queue.queue.put_nowait(
                SIGNAL_TASK_FINISH(Signal(id=self.__callback_queue.id)))

    def __pause_handler(self):
        """ Callback handler when signal.id == ID_PAUSE:
                <PAUSE SIGNAL>
        """
        for thread in self._tpm.get_group(GROUP_CLIENT):
            thread.owner.send_signal(SIGNAL_TASK_PAUSE())

        self._tpm.wait(GROUP_CLIENT)
        self._tpm.wait(GROUP_RELEASE)

        self._buffer_release_thread()
        self._client_callback_queue.put_nowait(SIGNAL_TASK_STOP())

        if self.__callback_queue:
            self.__callback_queue.queue.put_nowait(
                SIGNAL_TASK_PAUSE(Signal(id=self.__callback_queue.id)))

    def __url_status_handler(self, url_status):
        """ Callback handler when signal.id == ID_URL_STATUS:
                <URL SIGNAL>
        """
        client = url_status.content.client
        exception = url_status.content.exception
        source = client.getsource()
        srcwrapper = self._url.getwrapper(source)
        try:
            if url_status.id == ID_NORMAL:
                srcwrapper.clear_counter()
            elif url_status.id == ID_TIMEOUT:
                srcwrapper.timeout()
            elif url_status.id == ID_CRASH:
                srcwrapper.crash()
            elif url_status.id == ID_UNKNOWN:
                srcwrapper.unknown()
            elif url_status.id == ID_GAIERROR:
                srcwrapper.network_down()
        except (URLCrash, URLTimeout, URLUnknownError,
                NetworkDisconnected) as e:
            self.__exception_signal.put_nowait(SIGNAL_EXCEPTION(e))

    def _buffer_release_thread(self):
        """ A thread handler about releasing buffer. """
        with self._buff_lock:
            self._block.release_buffer(self.file)
            if self._saver:
                self._saver.dump()
        gc.collect()

    def __buffer_signal_handler(self, byte):
        """ Callback handler when signal.id == ID_BUFF:
                <BUFFER COUNTER>
        """
        self._url.clear_counter()
        self.__buff_counter += byte
        if self.__buff_counter >= self._buffsize:
            self.__buff_counter = 0
            self._tpm.put(self._buffer_release_thread, GROUP_RELEASE).start()

    def __client_thread_end_handler(self, client):
        """ Callback handler when signal.id == ID_THREAD_END:
                <CLIENT THREAD END OF LIFE>
        """
        if self.runflag:
            if self._block.is_go_finished():
                srcwrapper = self._url.getwrapper(self._block.getsource())
                srcwrapper.disuse()
            else:
                self._block.clear()
                self._tpm.put(client.run,
                              GROUP_CLIENT,
                              args=(self._client_callback_queue, ),
                              owner=self._block).start()

        if self.is_finish_go():
            self._buffer_release_thread()
            self._finish_task()

    def install_saver(self, saver):
        self._saver = saver

    def dump_data(self):
        return ConsoleDumpedData(block_data=tuple(self._block.dump_data()),
                                 acum_time=self._acum_time.getinctime())

    def load(self, dumped_data):
        data = ConsoleDumpedData(*dumped_data)
        self._acum_time = AccumulatedTime(data.acum_time)

        for i in data.block_data:
            b = BlockDumpedData(*i)
            prog = Progress((0, ))
            srcwrapper = self._url.get(b.url_id)
            source = srcwrapper.get()
            client = build_client(source, prog)
            srcwrapper.use()
            self.insert(client, prog)

    def get_block_size(self):
        return self._block_size

    def join(self, timeout=None):
        self._tpm.join(timeout)

    def install_external_callback_queue(self, handle):
        self.__callback_queue = handle

    def getBuffCnter(self):
        return self.__buff_counter
コード例 #19
0
class TCPHandler(logging.Handler):
    '''
    Python logging handler for sending JSON formatted messages over
    TCP, optionally wrapping the connection with TLSv1
    '''
    def __init__(self, host, port, ssl_ca_file=None):
        '''
        Instantiate a TCPHandler with the intent of connecting to the
        given host (string) and port (int) with or without using SSL/TLSv1
        '''
        logging.Handler.__init__(self)
        self.host = host
        self.port = port
        self.ssl_ca_file = ssl_ca_file
        self.sock = None
        self.queue = Queue(LOG_QUEUE_SIZE)
        self.connect_wait = BACKOFF_INITIAL
        self.raiseExceptions = 0

        self.hostname = socket.gethostname()
        if self.hostname.find('.') != -1:
            self.hostname = self.hostname.split('.', 1)[0]

        self.sender = threading.Thread(target=self.run)
        self.sender.setDaemon(True)
        self.sender.start()

    def connect(self):
        '''
        Create a connection with the server, sleeping for some
        period of time if connection errors have occurred recently.
        '''
        self.sock = socket.socket()
        if self.ssl_ca_file:
            self.sock = ssl.wrap_socket(self.sock,
                                        ssl_version=ssl.PROTOCOL_TLSv1,
                                        cert_reqs=ssl.CERT_REQUIRED,
                                        ca_certs=self.ssl_ca_file)

        INTERNAL_LOG.debug('Connecting (backoff: %.03f)' % self.connect_wait)
        time.sleep(self.connect_wait)
        self.sock.connect((self.host, self.port))

    def jsonify(self, record):
        '''
        Translate a LogRecord instance into a json_event
        '''
        timestamp = datetime.utcfromtimestamp(record.created)
        timestamp = timestamp.isoformat()

        fields = {
            'level': record.levelname,
            'filename': record.pathname,
            'lineno': record.lineno,
            'method': record.funcName,
        }
        if record.exc_info:
            fields['exception'] = str(record.exc_info)
            fields['traceback'] = format_exc(record.exc_info)

        log = {
            '@source_host': self.hostname,
            '@timestamp': timestamp,
            '@tags': [record.name],
            '@message': record.getMessage(),
            '@fields': fields,
        }
        return json.dumps(log)

    def emit(self, record):
        '''
        Send a LogRecord object formatted as json_event via a
        queue and worker thread.
        '''
        self.queue.put_nowait(record)

    def run(self):
        '''
        Main loop of the logger thread. All network I/O and exception handling
        originates here. Strings are consumed from self.queue and sent to
        self.sock, creating a new connection if necessary.

        If any exceptions are caught, the message is put() back on the queue
        and the exception is allowed to propagate up through
        logging.Handler.handleError(), potentially causing this thread to abort.
        '''
        INTERNAL_LOG.debug('Log I/O thread started')
        while True:
            record = self.queue.get()
            if record is None:
                break

            jsonrecord = self.jsonify(record)
            jsonrecord = '%s\n' % jsonrecord

            try:
                if self.sock is None:
                    self.connect()
                self.send(jsonrecord)
            except Exception:
                # This exception will be silently ignored and the message
                # requeued unless self.raiseExceptions=1
                self.queue.put(record)
                self.handleError(record)
            self.queue.task_done()
        INTERNAL_LOG.debug('Log I/O thread exited cleanly')

    def send(self, data):
        '''
        Keep calling SSLSocket.write until the entire message has been sent
        '''
        while len(data) > 0:
            if self.ssl_ca_file:
                sent = self.sock.write(data)
            else:
                sent = self.sock.send(data)
            data = data[sent:]
        self.connect_wait = BACKOFF_INITIAL

    def handleError(self, record):
        '''
        If an error occurs trying to send the log message, close the connection
        and delegate the exception handling to the superclass' handleError,
        which raises the exception (potentially killing the log thread) unless
        self.raiseExceptions is False.
        http://hg.python.org/cpython/file/e64d4518b23c/Lib/logging/__init__.py#l797
        '''
        INTERNAL_LOG.exception('Unable to send log')
        self.cleanup()
        self.connect_wait *= BACKOFF_MULTIPLE
        logging.Handler.handleError(self, record)

    def cleanup(self):
        '''
        If the socket to the server is still open, close it. Otherwise, do
        nothing.
        '''
        if self.sock:
            INTERNAL_LOG.info('Closing socket')
            self.sock.close()
            self.sock = None

    def close(self):
        '''
        Send a sentinel None object to the worker thread, telling it to exit
        and disconnect from the server.
        '''
        self.queue.put(None)
        self.cleanup()
コード例 #20
0
class TilestorWriter:
    __instance = None

    def __init__(self):
        if TilestorWriter.__instance is None:
            self.queue = Queue(maxsize=QUEUE_MAX_SIZE)
            self.cstart = None

            self._worker = Thread(target=self._job)
            self._worker.daemon = True
            self._worker.start()

    @classmethod
    def getInstance(cls):
        if cls.__instance is None:
            cls.__instance = TilestorWriter()
        return cls.__instance

    def put(self, payload, timeout):
        try:
            if timeout is None:
                self.queue.put_nowait(payload)
            else:
                self.queue.put(payload, timeout=timeout)
        except Full:
            raise TileWriterQueueFullException(
                "Tile writer queue is full at maxsize {}.".format(
                    self.queue.maxsize))

    def _job(self):
        self._shutdown = False
        atexit.register(self.wait_for_shutdown)

        data = None
        while True:
            self.cstart = None

            if data is None:
                try:
                    # When the thread is shutting down use minimal timeout
                    # otherwise use a big timeout not to block shutdown
                    # that may happen.
                    get_timeout = 0.001 if self._shutdown else min(
                        SHUTDOWN_TIMEOUT / 5, 2)
                    data = self.queue.get(True, get_timeout)
                except Empty:
                    if self._shutdown:
                        _logger.debug(
                            "Tile cache writer queue is empty now. Exiting!")
                        break
                    else:
                        continue

            db_path = data['db_path']

            self.cstart = ptime = time()

            tiles_written = 0
            time_taken = 0.0

            answers = []

            # Tile cache writer may fall sometimes in case of database connecti
            # problem for example. So we just skip a tile with error and log an
            # exception.
            try:

                with transaction.manager:
                    conn = DBSession.connection()
                    tilestor = get_tile_db(db_path)

                    while data is not None and data['db_path'] == db_path:
                        z, x, y = data['tile']
                        tstamp = int((datetime.utcnow() -
                                      TIMESTAMP_EPOCH).total_seconds())

                        img = data['img']
                        if img is not None and img.mode != 'RGBA':
                            img = img.convert('RGBA')

                        colortuple = imgcolor(img)
                        color = pack_color(
                            colortuple) if colortuple is not None else None

                        self._write_tile_meta(
                            conn, data['uuid'],
                            dict(z=z, x=x, y=y, color=color, tstamp=tstamp))

                        if color is None:
                            buf = BytesIO()
                            img.save(buf, format='PNG', compress_level=3)
                            value = buf.getvalue()

                            self._write_tile_data(tilestor, z, x, y, tstamp,
                                                  value)

                        if 'answer_queue' in data:
                            answers.append(data['answer_queue'])

                        tiles_written += 1

                        ctime = time()
                        time_taken += ctime - ptime

                        if tiles_written >= BATCH_MAX_TILES:
                            # Break the batch
                            data = None
                        else:
                            # Try to get next tile for the batch. Or break
                            # the batch if there is no tiles left.
                            if time_taken < BATCH_DEADLINE:
                                try:
                                    data = self.queue.get(
                                        timeout=(BATCH_DEADLINE - time_taken))
                                except Empty:
                                    data = None
                            else:
                                data = None

                        # Do not account queue block time
                        ptime = time()

                    # Force zope session management to commit changes
                    mark_changed(DBSession())
                    tilestor.commit()

                    time_taken += time() - ptime
                    _logger.debug(
                        "%d tiles were written in %0.3f seconds (%0.1f per "
                        "second, qsize = %d)", tiles_written, time_taken,
                        tiles_written / time_taken, self.queue.qsize())

                # Report about sucess only after transaction commit
                for a in answers:
                    a.put_nowait(None)

            except Exception as exc:
                _logger.exception("Uncaught exception in tile writer: %s",
                                  exc.message)

                data = None
                self.cstart = None
                tilestor.rollback()

    def _write_tile_meta(self, conn, table_uuid, row):
        result = conn.execute(
            db.sql.text('SELECT true FROM tile_cache."{}" '
                        'WHERE z = :z AND x = :x AND y = :y '
                        'LIMIT 1 FOR UPDATE'.format(table_uuid)), **row)

        if result.returns_rows:
            conn.execute(
                db.sql.text('DELETE FROM tile_cache."{0}" '
                            'WHERE z = :z AND x = :x AND y = :y '
                            ''.format(table_uuid)), **row)

        conn.execute(
            db.sql.text(
                'INSERT INTO tile_cache."{0}" (z, x, y, color, tstamp) '
                'VALUES (:z, :x, :y, :color, :tstamp)'.format(table_uuid)),
            **row)

    def _write_tile_data(self, tilestor, z, x, y, tstamp, value):
        tilestor.execute("DELETE FROM tile WHERE z = ? AND x = ? AND y = ?",
                         (z, x, y))

        try:
            tilestor.execute("INSERT INTO tile VALUES (?, ?, ?, ?, ?)",
                             (z, x, y, tstamp, value))
        except sqlite3.IntegrityError:
            # NOTE: Race condition with other proccess may occurs here.
            # TODO: ON CONFLICT DO ... in SQLite >= 3.24.0 (python 3)
            pass

    def wait_for_shutdown(self, timeout=SHUTDOWN_TIMEOUT):
        if not self._worker.is_alive():
            return True

        _logger.debug(
            "Waiting for shutdown of tile cache writer for %d seconds (" +
            "qsize = %d)...", timeout, self.queue.qsize())

        self._shutdown = True
        self._worker.join(timeout)

        if self._worker.is_alive():
            _logger.warn(
                "Tile cache writer is still running. It'll be killed!")
            return False
        else:
            _logger.debug("Tile cache writer has successfully shut down.")
            return True
コード例 #21
0
class JsonRpcServer(object):
    log = logging.getLogger("jsonrpc.JsonRpcServer")
    """
    Creates new JsonrRpcServer by providing a bridge, timeout in seconds
    which defining how often we should log connections stats and thread
    factory.
    """
    def __init__(self, bridge, timeout, cif, threadFactory=None):
        self._bridge = bridge
        self._cif = cif
        self._workQueue = Queue()
        self._threadFactory = threadFactory
        self._timeout = timeout
        self._next_report = monotonic_time() + self._timeout
        self._counter = 0

    def queueRequest(self, req):
        self._workQueue.put_nowait(req)

    """
    Aggregates number of requests received by vdsm. Each request from
    a batch is added separately. After time defined by timeout we log
    number of requests.
    """

    def _attempt_log_stats(self):
        self._counter += 1
        if monotonic_time() > self._next_report:
            self.log.info('%s requests processed during %s seconds',
                          self._counter, self._timeout)
            self._next_report += self._timeout
            self._counter = 0

    def _serveRequest(self, ctx, req):
        start_time = monotonic_time()
        response = self._handle_request(req, ctx.server_address)
        error = getattr(response, "error", None)
        if error is None:
            response_log = "succeeded"
        else:
            response_log = "failed (error %s)" % (error.code, )
        self.log.info("RPC call %s %s in %.2f seconds", req.method,
                      response_log,
                      monotonic_time() - start_time)
        if response is not None:
            ctx.requestDone(response)

    def _handle_request(self, req, server_address):
        self._attempt_log_stats()
        logLevel = logging.DEBUG

        # VDSM should never respond to any request before all information about
        # running VMs is recovered, see https://bugzilla.redhat.com/1339291
        if not self._cif.ready:
            self.log.info("In recovery, ignoring '%s' in bridge with %s",
                          req.method, req.params)
            return JsonRpcResponse(None, exception.RecoveryInProgress(),
                                   req.id)

        self.log.log(logLevel, "Calling '%s' in bridge with %s", req.method,
                     req.params)
        try:
            method = self._bridge.dispatch(req.method)
        except JsonRpcMethodNotFoundError as e:
            if req.isNotification():
                return None

            return JsonRpcResponse(None, e, req.id)

        try:
            params = req.params
            self._bridge.register_server_address(server_address)
            if isinstance(req.params, list):
                res = method(*params)
            else:
                res = method(**params)
            self._bridge.unregister_server_address()
        except JsonRpcError as e:
            return JsonRpcResponse(None, e, req.id)
        except Exception as e:
            self.log.exception("Internal server error")
            return JsonRpcResponse(None, JsonRpcInternalError(str(e)), req.id)
        else:
            res = True if res is None else res
            self.log.log(logLevel, "Return '%s' in bridge with %s", req.method,
                         res)
            if isinstance(res, Suppressed):
                res = res.value
            return JsonRpcResponse(res, None, req.id)

    @traceback(on=log.name)
    def serve_requests(self):
        while True:
            obj = self._workQueue.get()
            if obj is None:
                break

            client, server_address, msg = obj
            self._parseMessage(client, server_address, msg)

    def _parseMessage(self, client, server_address, msg):
        ctx = _JsonRpcServeRequestContext(client, server_address)

        try:
            rawRequests = json.loads(msg)
        except:
            ctx.addResponse(JsonRpcResponse(None, JsonRpcParseError(), None))
            ctx.sendReply()
            return

        if isinstance(rawRequests, list):
            # Empty batch request
            if len(rawRequests) == 0:
                ctx.addResponse(
                    JsonRpcResponse(
                        None,
                        JsonRpcInvalidRequestError('request batch is empty',
                                                   rawRequests), None))
                ctx.sendReply()
                return
        else:
            # From this point on we know it's always a list
            rawRequests = [rawRequests]

        # JSON Parsed handling each request
        requests = []
        for rawRequest in rawRequests:
            try:
                req = JsonRpcRequest.fromRawObject(rawRequest)
                requests.append(req)
            except JsonRpcError as err:
                ctx.addResponse(JsonRpcResponse(None, err, None))
            except:
                ctx.addResponse(
                    JsonRpcResponse(None, JsonRpcInternalError(), None))

        ctx.setRequests(requests)

        # No request was built successfully or is only notifications
        if ctx.counter == 0:
            ctx.sendReply()

        for request in requests:
            self._runRequest(ctx, request)

    def _runRequest(self, ctx, request):
        if self._threadFactory is None:
            self._serveRequest(ctx, request)
        else:
            try:
                self._threadFactory(partial(self._serveRequest, ctx, request))
            except Exception as e:
                self.log.exception("could not allocate request thread")
                ctx.requestDone(
                    JsonRpcResponse(None, JsonRpcInternalError(str(e)),
                                    request.id))

    def stop(self):
        self.log.info("Stopping JsonRPC Server")
        self._workQueue.put_nowait(None)
コード例 #22
0
ファイル: http.py プロジェクト: xunbian/Nbdler
class HTTPClient(AbstractClient):
    __connected = False

    def __init__(self, source, progress):
        self.source = source
        self.progress = progress

        self._response = None
        self._connection = None

        self._callback = None
        self.__signal = Queue()

    def geturl(self):
        return self.source.url

    def _buffer(self, buff):
        if buff:
            self.progress.buffer(buff)
            self._callback.put_nowait(SIGNAL_TASK_BUFF(len(buff)))

    def _build_connection(self):
        if self.source.scheme == 'http':
            client_handler = HTTPConnection
        elif self.source.scheme == 'https':
            client_handler = HTTPSConnection
        else:
            raise ValueError(
                'http client got an unexpected url protocol (%s).' %
                self.source.scheme)

        conn = client_handler(host=self.source.hostname,
                              port=self.source.port,
                              timeout=5)

        req_range = (self.progress.begin + self.progress.go_inc,
                     self.progress.end)
        path, headers = self.source.http_request_header(req_range)
        conn.request('GET', path, '', dict(headers))

        return conn

    def _conn_response(self, conn):
        res = conn.getresponse()
        sleep(0.01)
        if res.code in (301, 302, 303, 307):
            redurl = res.getheader('location', None)
            self.source.http_redirect(redurl)
            res.close()
            conn.sock.shutdown(socket.SHUT_RDWR)
            conn.close()
            conn = self._build_connection()
            return self._conn_response(conn)
        elif 400 <= res.code < 500:
            raise HTTP4XXError("(%d)URL: %s " % (res.code, self.source.url))
        elif res.code not in (200, 206):
            self._callback.put_nowait(SIGNAL_URL_STATUS(SIGNAL_UNKNOWN(self)))
            raise URLUnknownError()
        return conn, res

    def connect(self):
        conn = self._build_connection()
        conn, res = self._conn_response(conn)
        self._connection = conn
        self._response = res
        self.source.response(self.source.url, res.getheaders(), res.getcode(),
                             res.length)
        self.__connected = True
        return res

    def retrieve(self):

        self.progress.start()
        self._callback.put_nowait(
            SIGNAL_URL_STATUS(
                SIGNAL_NORMAL(ClientException(client=self, exception=None))))
        buff = b''
        while True:
            if not self.__signal.empty():
                signal = self.__signal.get()
                if signal.id == ID_TASK_SLICE:
                    putrange = self.progress.slice_check(signal.content)
                    self._callback.put_nowait(
                        SIGNAL_TASK_SLICE(
                            Slice(client=HTTPClient, range=putrange)))
                elif signal.id == ID_TASK_PAUSE:
                    self._buffer(buff)
                    self.__signal.task_done()
                    break
                elif signal.id == ID_WAIT:
                    sleep(signal.content)

                self.__signal.task_done()

            prv_len = len(buff)
            remain = self.progress.length - self.progress.go_inc
            try:
                if remain >= 8192:
                    buff += self._response.read(8192)
                elif remain > 0:
                    buff += self._response.read(remain)
                else:
                    self._buffer(buff)
                    break
            except (socket.gaierror, URLError, HTTPError, socket.timeout,
                    Exception) as e:
                self._buffer(buff)
                self._exception_handler(e)
                break

            if len(buff) - prv_len == 0:
                self._buffer(buff)
                if self._response.chunked and not self._response.fp:
                    self.progress.force_to_finish_go()

                self._callback.put_nowait(SIGNAL_EMPTY_RECV(self))
                break
            self.progress.go(len(buff) - prv_len)

            if self.progress.go_inc >= self.progress.length:
                self._buffer(buff)
                break
            elif len(buff) >= 1048576:  # 1 MB
                self._buffer(buff)
                del buff
                buff = b''

        self.progress.stop()

    def close(self):
        response = self._response
        connection = self._connection
        self._response = None
        self._connection = None
        self.__connected = False

        if response:
            response.close()
        if connection:
            if connection.sock:
                connection.sock.shutdown(socket.SHUT_RDWR)
            connection.close()

    def getheader(self, name, default=None):
        return self._response.getheader(name, default)

    def run(self, callback):
        self._callback = callback

        if not self.__signal.empty():
            signal = self.__signal.get()
            if signal.id == ID_TASK_SLICE:
                putrange = self.progress.slice_check(signal.content)
                self._callback.put_nowait(
                    SIGNAL_TASK_SLICE(Slice(client=HTTPClient,
                                            range=putrange)))
            elif signal.id == ID_SWITCH:
                self.source = signal.content
            elif signal.id == ID_TASK_PAUSE:
                self._callback = None
                callback.put_nowait(SIGNAL_THREAD_END(self))
            elif signal.id == ID_WAIT:
                sleep(signal.content)
            self.__signal.task_done()

        try:
            if not self.__connected:
                self.connect()
            self.retrieve()
        except (socket.gaierror, socket.timeout, URLUnknownError, HTTP4XXError,
                URLError, HTTPError, Exception) as e:
            self._exception_handler(e)
        finally:
            self.close()

        self._callback = None
        callback.put_nowait(SIGNAL_THREAD_END(self))

    def open_only(self, callback):
        self._callback = callback

        res = None
        try:
            res = self.connect()
        except (socket.gaierror, socket.timeout, URLUnknownError, HTTP4XXError,
                URLError, HTTPError, Exception) as e:
            self._exception_handler(e)

        self._callback = None
        return res

    def _exception_handler(self, exception):
        client_exc = ClientException(client=self, exception=exception)
        if type(exception) in (URLError, HTTPError, socket.timeout):
            signal_type = SIGNAL_TIMEOUT
        elif type(exception) is socket.gaierror:
            signal_type = SIGNAL_GAIERROR
        elif type(exception) is HTTP4XXError:
            signal_type = SIGNAL_CRASH
        else:
            signal_type = SIGNAL_UNKNOWN

        self._callback.put_nowait(SIGNAL_URL_STATUS(signal_type(client_exc)))

    def install_callback(self, callback):
        self._callback = callback

    def clear_callback(self):
        self._callback = None

    def send_signal(self, signal):
        self.__signal.put_nowait(signal)

    def write_to_file(self, fp):
        if not self.progress.is_empty_buff():
            fp.seek(self.progress.begin + self.progress.done_inc)
            fp.writelines(self.progress.fetch_buffer())

    def getprogress(self):
        return self.progress

    def getsource(self):
        return self.source

    def getresonse(self):
        return self._response

    def clear_signal(self):
        del self.__signal
        self.__signal = Queue()