def test_body_received_on_post(self): job_id = self._queue_job('post', '/test', body="this is a test body") def _never_called(other_self): ok_(False) body_seen = [] def _do_post(other_self): other_self.send_response(200) other_self.send_header('Content-type', 'text/text') other_self.end_headers() other_self.wfile.write("POST 200") content_len = int(other_self.headers.getheader('content-length')) body_seen.append(other_self.rfile.read(content_len)) self._start_server( _make_handler_class('CheckBody', 200, do_GET=_never_called, do_POST=_do_post)) queue_processor.process_with_pool(1, _read_default_db_ini()) self._assert_done(job_id, queue_processor.SUCCESS, "[JOBID %s] Job succeeded: POST 200" % job_id) eq_(1, len(body_seen)) eq_("this is a test body", body_seen[0])
def test_body_received_on_post(self): job_id = self._queue_job('post', '/test', body="this is a test body") def _never_called(other_self): ok_(False) body_seen = [] def _do_post(other_self): other_self.send_response(200) other_self.send_header('Content-type','text/text') other_self.end_headers() other_self.wfile.write("POST 200") content_len = int(other_self.headers.getheader('content-length')) body_seen.append(other_self.rfile.read(content_len)) self._start_server(_make_handler_class('CheckBody', 200, do_GET=_never_called, do_POST=_do_post)) queue_processor.process_with_pool(1, _read_default_db_ini()) self._assert_done(job_id, queue_processor.SUCCESS, "[JOBID %s] Job succeeded: POST 200" % job_id) eq_(1, len(body_seen)) eq_("this is a test body", body_seen[0])
def _do_test_job_simple(self, resp_code, result_code, template_str): job_id = self._queue_job('get', '/test') self._start_server( _make_handler_class('Handle%d' % resp_code, resp_code)) queue_processor.process_with_pool(1, _read_default_db_ini()) self._assert_done( job_id, result_code, template_str % (dict(job_id=job_id, resp_code=resp_code)))
def _do_test_job_simple(self, resp_code, result_code, template_str): job_id = self._queue_job('get', '/test') self._start_server(_make_handler_class('Handle%d' % resp_code, resp_code)) queue_processor.process_with_pool(1, _read_default_db_ini()) self._assert_done(job_id, result_code, template_str % (dict(job_id=job_id, resp_code=resp_code)))
def test_multiple_jobs(self): job_id_one = self._queue_job('post', '/test', body="this is a test body") job_id_two = self._queue_job('get', '/test') self._start_server(_make_handler_class('TestMultipleJobs', 200)) # make sure that even running with 1 process, we do both jobs queue_processor.process_with_pool(1, _read_default_db_ini()) self._assert_done(job_id_one, queue_processor.SUCCESS, "[JOBID %s] Job succeeded: POST 200" % job_id_one) self._assert_done(job_id_two, queue_processor.SUCCESS, "[JOBID %s] Job succeeded: GET 200" % job_id_two)
def test_respects_new_retry_delay_secs(self): def _new_retry_delay_seconds(other_self): other_self.send_response(503) other_self.send_header('x-bitlancer-retry-delay-secs','786') other_self.end_headers() other_self.wfile.write("GET 503") job_id = self._queue_job('get', '/test', retry_delay_secs=10) eq_(10, self._get_retry_delay_secs(job_id)) self._start_server(_make_handler_class('TestRetryDelaySeconds', 503, do_GET=_new_retry_delay_seconds)) queue_processor.process_with_pool(1, _read_default_db_ini()) eq_(786, self._get_retry_delay_secs(job_id))
def test_time_out(self): # this is crappy time based stuff, and yet it's better than # not testing IMHO. def _sleep_little_baby(other_self): time.sleep(5) job_id = self._queue_job('get', '/test', timeout_secs=1) self._start_server(_make_handler_class('TestTimeoutHandler', 503, do_GET=_sleep_little_baby)) queue_processor.process_with_pool(1, _read_default_db_ini()) self._assert_done( job_id, queue_processor.TEMPORARY_FAILURE, "[JOBID %s] Job failed due to timeout" % job_id)
def test_time_out(self): # this is crappy time based stuff, and yet it's better than # not testing IMHO. def _sleep_little_baby(other_self): time.sleep(5) job_id = self._queue_job('get', '/test', timeout_secs=1) self._start_server( _make_handler_class('TestTimeoutHandler', 503, do_GET=_sleep_little_baby)) queue_processor.process_with_pool(1, _read_default_db_ini()) self._assert_done(job_id, queue_processor.TEMPORARY_FAILURE, "[JOBID %s] Job failed due to timeout" % job_id)
def test_multiple_jobs(self): job_id_one = self._queue_job('post', '/test', body="this is a test body") job_id_two = self._queue_job('get', '/test') self._start_server(_make_handler_class('TestMultipleJobs', 200)) # make sure that even running with 1 process, we do both jobs queue_processor.process_with_pool(1, _read_default_db_ini()) self._assert_done( job_id_one, queue_processor.SUCCESS, "[JOBID %s] Job succeeded: POST 200" % job_id_one) self._assert_done( job_id_two, queue_processor.SUCCESS, "[JOBID %s] Job succeeded: GET 200" % job_id_two)
def test_respects_new_retry_delay_secs(self): def _new_retry_delay_seconds(other_self): other_self.send_response(503) other_self.send_header('x-bitlancer-retry-delay-secs', '786') other_self.end_headers() other_self.wfile.write("GET 503") job_id = self._queue_job('get', '/test', retry_delay_secs=10) eq_(10, self._get_retry_delay_secs(job_id)) self._start_server( _make_handler_class('TestRetryDelaySeconds', 503, do_GET=_new_retry_delay_seconds)) queue_processor.process_with_pool(1, _read_default_db_ini()) eq_(786, self._get_retry_delay_secs(job_id))
def test_retries(self): job_id = self._queue_job('get', '/test', remaining_retries=1) self._start_server(_make_handler_class('TestRetriesHandler', 503)) queue_processor.process_with_pool(1, _read_default_db_ini()) self._assert_done( job_id, queue_processor.TEMPORARY_FAILURE, "[JOBID %s] Job failed temporarily: GET 503" % job_id) queue_processor.process_with_pool(1, _read_default_db_ini()) last_started_at = self._get_last_started_at(job_id) eq_(0, self._get_remaining_retries(job_id)) queue_processor.process_with_pool(1, _read_default_db_ini()) eq_(0, self._get_remaining_retries(job_id)) second_last_started_at = self._get_last_started_at(job_id) # the job should not have been re-worked eq_(last_started_at, second_last_started_at)
def test_delay_secs(self): # this is crappy time based stuff, and yet it's better than # not testing IMHO. job_id = self._queue_job('get', '/test', retry_delay_secs=3) self._start_server(_make_handler_class('TestDelaysHandler', 503)) queue_processor.process_with_pool(1, _read_default_db_ini()) self._assert_done( job_id, queue_processor.TEMPORARY_FAILURE, "[JOBID %s] Job failed temporarily: GET 503" % job_id) last_started_at = self._get_last_started_at(job_id) queue_processor.process_with_pool(1, _read_default_db_ini()) second_last_started_at = self._get_last_started_at(job_id) # the job should not have been re-worked, as we should be # safely within the 3 second delay eq_(last_started_at, second_last_started_at) time.sleep(5) queue_processor.process_with_pool(1, _read_default_db_ini()) # now the job should have been reworked third_last_started_at = self._get_last_started_at(job_id) ok_(last_started_at != third_last_started_at)