Пример #1
0
def test_queue_api_params():
    '''Check that timeout and loops are taking in consideration in the .send method of the Communication Api
    for processes communicating via a queue as follows:

    1) timeout is respected by the put method of the queue
    2) num loops is respected by the put method of the queue. The loops are exhausted, an exception is thrown and
    the put method is called loops times
    '''

    # (1)
    queue_factory = factory.QueueCommunication()
    timeout = 20
    parent = queue_factory.parent(timeout=timeout)
    with patch.object(parent.conn, 'put') as mock_put:
        parent.send(mpq_protocol.REQ_TEST_PARENT)

    message = [mpq_protocol.REQ_TEST_PARENT, parent.pid, None, None]
    mock_put.assert_called_with(message, timeout=timeout)

    # (2)
    parent = queue_factory.parent(timeout=timeout, loops=20)
    assert parent.loops == 20
    mock_put.reset_mock()
    with patch.object(parent.conn, 'put',
                      side_effect=queue.Full()) as mock_put:
        with pytest.raises(errors.QueuesCommunicationError):
            parent.send(mpq_protocol.REQ_TEST_PARENT)
    assert mock_put.call_count == parent.loops
Пример #2
0
    def put(self, img, label, boxes, *args, **kwargs):
        # Check whether the params are consistent with the data we can store
        def check_consistency(name, arr, dtype, shape, byte_count):
            if type(arr) is not np.ndarray:
                raise ValueError(name + ' needs to be a numpy array')
            if arr.dtype != dtype:
                raise ValueError('{}\'s elements need to be of type {} but is {}' \
                                 .format(name, str(dtype), str(arr.dtype)))
            if arr.shape != shape:
                raise ValueError('{}\'s shape needs to be {} but is {}' \
                                 .format(name, shape, arr.shape))
            if len(arr.tobytes()) != byte_count:
                raise ValueError('{}\'s byte count needs to be {} but is {}' \
                                 .format(name, byte_count, len(arr.data)))

        check_consistency('img', img, self.img_dtype, self.img_shape,
                          self.img_bc)
        check_consistency('label', label, self.label_dtype, self.label_shape,
                          self.label_bc)

        # If we can not get the slot within timeout we are actually full, not
        # empty
        try:
            arr_id = self.array_queue.get(*args, **kwargs)
        except q.Empty:
            raise q.Full()

        # Copy the arrays into the shared pool
        self.array_pool[arr_id][0][:] = img
        self.array_pool[arr_id][1][:] = label
        self.queue.put((arr_id, boxes), *args, **kwargs)
Пример #3
0
 def make_request(self, target_name, request_payload: Dict):
     full_exc = queue.Full(
         f"You made another request to {target_name} before it finished the first request."
         f"You must use await_response to wait for the response first.")
     if not self.requests[target_name]["requests"].full():
         try:
             self.requests[target_name]["requests"].put_nowait(
                 request_payload)
         except queue.Full:
             raise full_exc
     else:
         raise full_exc
Пример #4
0
 def put_exclusive(self, item):
     """Adds the item to the queue only if unique in the queue.
     
     Args:
         item: The object to add to the queue.
     
     Raises:
         queue.Full if a duplicate item is in the queue.
     """
     if not self.contains(item):
         self.put(item)
     else:
         raise queue.Full('Duplicate item in queue')
Пример #5
0
    def run(self):
        while (True):

            password = input("Next Password: ")

            self.condition.acquire()

            try:
                self.queue.put(Password(password), block=False)
                self.condition.notify()
            except queue.Full():
                self.condition.wait()

            self.condition.release()
Пример #6
0
    def put(self, value, blocking=True, timeout=None):
        # wait for an empty slot
        if not self.putsem.acquire(blocking, timeout):
            raise queue.Full()

        # take specific slot
        with self.stoplock:  # no timeout but should go unnoticed
            offset = (self.stop.value % self.maxsize) * self.itemsize

            try:  # transfer values and update state
                struct.pack_into(self.fmt, self.values, offset, *value)
            except Exception:
                raise
            else:
                self.stop.value += 1
                self.getsem.release()
Пример #7
0
 def respond(self, service_name: str, request: Dict):
     full_exc = queue.Full(
         "Response was not read by requesting process. You must read the response in the "
         "requesting process before making another request.")
     service = self.services[service_name]
     if not service.responses.full() or service.active_request is not None:
         try:
             handler_response = service.handler(request)
             if not handler_response is None:
                 response = {
                     "id": request["id"],
                     "payload": handler_response
                 }
                 service.responses.put_nowait(response)
             service.reset_active_request()
         except queue.Full:
             raise full_exc
     else:
         raise full_exc
Пример #8
0
    def put(
        self,
        item: Any,
        /
    ):
        meta = pickle.dumps(self.get_metadata(item)) if self.get_metadata else None
        item_bytes = self.encode_value(item) if self.encode_value else item
        try:
            txn, cursors, changed, implicit = \
            thread.local.context.get(self._env, write = True, internal = True)

            curlen = txn.stat(self._userdb[0])['entries']
            assert curlen <= self._maxsize_cached
            if curlen == self._maxsize_cached:
                raise queue.Full()

            cursor = cursors[self._userdb[0]]

            if not cursor.last():
                key_bytes = struct.pack('@N', 0)
            else:
                key_bytes = struct.pack(
                    '@N',
                    struct.unpack('@N', cursor.key())[0] + 1
                )
            assert cursor.put(
                key = key_bytes, value = item_bytes,
                append = True
            )
            if self.get_metadata:
                assert txn.put(
                    key = key_bytes, value = meta,
                    append = True, db = self._userdb[1]
                )

            if implicit:
                self._increment_version(cursors)
                txn.commit()
            else:
                changed.add(self)
        except BaseException as exc:
            self._abort(exc, txn, implicit)
Пример #9
0
    def submit_workflow(
        self,
        workflow_id: str,
        state: WorkflowExecutionState,
        ignore_existing: bool = False,
    ):
        """Submit workflow. A submitted workflow can be executed later.

        Args:
            workflow_id: ID of the workflow.
            state: The initial state of the workflow.
            ignore_existing: Ignore existing executed workflows.
        """
        if workflow_id in self._workflow_executors:
            raise RuntimeError(
                f"Workflow[id={workflow_id}] is being executed.")
        if workflow_id in self._executed_workflows and not ignore_existing:
            raise RuntimeError(
                f"Workflow[id={workflow_id}] has been executed.")

        if state.output_task_id is None:
            raise ValueError(
                "No root DAG specified that generates output for the workflow."
            )

        wf_store = workflow_storage.WorkflowStorage(workflow_id)
        if (self._max_running_workflows != -1 and
                len(self._running_workflows) >= self._max_running_workflows):
            try:
                self._workflow_queue.put_nowait(workflow_id)
                self._queued_workflows[workflow_id] = asyncio.Future()
                wf_store.update_workflow_status(WorkflowStatus.PENDING)
            except queue.Full:
                # override with our error message
                raise queue.Full("Workflow queue has been full") from None
        else:
            self._running_workflows.add(workflow_id)
            wf_store.update_workflow_status(WorkflowStatus.RUNNING)
        # initialize executor
        self._workflow_executors[workflow_id] = WorkflowExecutor(state)
Пример #10
0
	def _flush_output(self, finish = False):
		if self._outbuffer.full():
			raise queue.Full("No space in outbuffer to flush output")

		outbatch = []

		while not self.output_finished.is_set() and not self._check_stop():
			try:
				batch_element = self._outbatch.get(timeout=1)
				if not isinstance(batch_element, OutputEndMarker):
					outbatch.extend(batch_element)
				else:
					outbatch.append(batch_element)
					self.output_finished.set()
			except queue.Empty:
				if finish:
					if pyparade.util.DEBUG:
						print(self.name + " is waiting for end marker")
				else: #all elements added to outbatch for now
					break

		if len(outbatch) > 0:
			self._outbuffer.put(outbatch)
			self._last_output = time.time()
Пример #11
0
 def put_nowait(self, value):
     with self.__lock:
         if len(self.__queue) > self.__maxSize:
             raise queue.Full()
         self.__queue.append(value)
Пример #12
0
 def put(self, obj):
     with self._rlock:
         if self._maxsize and self._size.value >= self._maxsize:
             raise queue.Full()
         self._size.value += 1
         _write_object(self._pipe_w, obj)
Пример #13
0
 def append(self, *args, **kwargs):
     super().append(*args, **kwargs)
     if len(self) >= self._nr_target:
         raise queue.Full()
Пример #14
0
 def write(self, item):
     try:
         self.queue.put(item, block=False)
     except queue.Full:
         raise queue.Full(f"Full analysis fifo: {self.__name__}. This should never happen")
Пример #15
0
 def my_put():
     if throw:
         raise queue.Full('Err')
     else:
         return None