Esempio n. 1
0
    def flush(self):
        if self._state == TERMINATE:
            return
        # cancel all tasks that haven't been accepted so that NACK is sent.
        for job in values(self._cache):
            if not job._accepted:
                job._cancel()

        # clear the outgoing buffer as the tasks will be redelivered by
        # the broker anyway.
        if self.outbound_buffer:
            self.outbound_buffer.clear()

        self.maintain_pool()

        try:
            # ...but we must continue writing the payloads we already started
            # to keep message boundaries.
            # The messages may be NACK'ed later if synack is enabled.
            if self._state == RUN:
                # flush outgoing buffers
                intervals = fxrange(0.01, 0.1, 0.01, repeatlast=True)
                owned_by = {}
                for job in values(self._cache):
                    writer = _get_job_writer(job)
                    if writer is not None:
                        owned_by[writer] = job

                while self._active_writers:
                    writers = list(self._active_writers)
                    for gen in writers:
                        if gen.__name__ == "_write_job" and gen_not_started(gen):
                            # hasn't started writing the job so can
                            # discard the task, but we must also remove
                            # it from the Pool._cache.
                            try:
                                job = owned_by[gen]
                            except KeyError:
                                pass
                            else:
                                # removes from Pool._cache
                                job.discard()
                            self._active_writers.discard(gen)
                        else:
                            try:
                                job = owned_by[gen]
                            except KeyError:
                                pass
                            else:
                                job_proc = job._write_to
                                if job_proc._is_alive():
                                    self._flush_writer(job_proc, gen)
                    # workers may have exited in the meantime.
                    self.maintain_pool()
                    sleep(next(intervals))  # don't busyloop
        finally:
            self.outbound_buffer.clear()
            self._active_writers.clear()
            self._active_writes.clear()
            self._busy_workers.clear()
Esempio n. 2
0
 def skew(self, start=1.0, stop=None, step=1.0):
     it = fxrange(start, stop, step, repeatlast=True)
     for task in self.tasks:
         task.set(countdown=next(it))
     return self
Esempio n. 3
0
def test_fxrange__no_repeatlast():
    assert list(fxrange(1.0, 3.0, 1.0)) == [1.0, 2.0, 3.0]
Esempio n. 4
0
    def flush(self):
        if self._state == TERMINATE:
            return
        # cancel all tasks that haven't been accepted so that NACK is sent.
        for job in self._cache.values():
            if not job._accepted:
                job._cancel()

        # clear the outgoing buffer as the tasks will be redelivered by
        # the broker anyway.
        if self.outbound_buffer:
            self.outbound_buffer.clear()

        self.maintain_pool()

        try:
            # ...but we must continue writing the payloads we already started
            # to keep message boundaries.
            # The messages may be NACK'ed later if synack is enabled.
            if self._state == RUN:
                # flush outgoing buffers
                intervals = fxrange(0.01, 0.1, 0.01, repeatlast=True)
                owned_by = {}
                for job in self._cache.values():
                    writer = _get_job_writer(job)
                    if writer is not None:
                        owned_by[writer] = job

                while self._active_writers:
                    writers = list(self._active_writers)
                    for gen in writers:
                        if (gen.__name__ == '_write_job' and
                                gen_not_started(gen)):
                            # hasn't started writing the job so can
                            # discard the task, but we must also remove
                            # it from the Pool._cache.
                            try:
                                job = owned_by[gen]
                            except KeyError:
                                pass
                            else:
                                # removes from Pool._cache
                                job.discard()
                            self._active_writers.discard(gen)
                        else:
                            try:
                                job = owned_by[gen]
                            except KeyError:
                                pass
                            else:
                                job_proc = job._write_to
                                if job_proc._is_alive():
                                    self._flush_writer(job_proc, gen)
                    # workers may have exited in the meantime.
                    self.maintain_pool()
                    sleep(next(intervals))  # don't busyloop
        finally:
            self.outbound_buffer.clear()
            self._active_writers.clear()
            self._active_writes.clear()
            self._busy_workers.clear()
Esempio n. 5
0
 def skew(self, start=1.0, stop=None, step=1.0):
     it = fxrange(start, stop, step, repeatlast=True)
     for task in self.tasks:
         task.set(countdown=next(it))
     return self
Esempio n. 6
0
def test_fxrange__no_repeatlast():
    assert list(fxrange(1.0, 3.0, 1.0)) == [1.0, 2.0, 3.0]
Esempio n. 7
0
 def test_fxrange_no_repeatlast(self):
     self.assertEqual(list(fxrange(1.0, 3.0, 1.0)),
                      [1.0, 2.0, 3.0])