def test_password_not_given(self): class MyChangePasswordNotRequired(self.service_class): class SimpleIO(object): input_required = ('id',) def handle(self): pass class MyChangePasswordRequired(self.service_class): class SimpleIO(object): input_required = ('id', 'password1', 'password2') def handle(self): pass request1 = {'id':rand_int()} response_data1 = {} self.check_impl(MyChangePasswordNotRequired, request1, response_data1, 'ignored') password1 = password2 = rand_string() request2 = {'id':rand_int(), 'password1':password1, 'password2':password2} response_data2 = {} self.check_impl(MyChangePasswordRequired, request2, response_data2, 'ignored')
def get_response_data(self): return Bunch({'id':rand_int(), 'name':self.name, 'host':rand_string(), 'port':rand_int(), 'queue_manager':rand_int(), 'channel':rand_string(), 'cache_open_send_queues':rand_bool(), 'cache_open_receive_queues':rand_bool(), 'use_shared_connections':rand_bool(), 'ssl':rand_bool(), 'needs_mcd':rand_bool(), 'max_chars_printed':rand_int(), 'ssl_cipher_spec':rand_string(), 'ssl_key_repository':rand_string()})
def test_publish_custom_attrs(self): self._check_publish(**{ 'mime_type': rand_string(), 'priority': rand_int(), 'expiration': rand_int(1000, 2000), 'msg_id': rand_string(), })
def test_invoke_retry_ok(self): target = 'target_{}'.format(rand_string()) callback = 'callback_{}'.format(rand_string()) callback_impl_name = 'callback_impl_name_{}'.format(rand_string()) cid = new_cid() expected_result = rand_string() invoking_service = DummyTargetService(callback, callback_impl_name, cid, expected_result) ir = InvokeRetry(invoking_service) kwargs = { 'async_fallback': True, 'callback': callback, 'context': {rand_string():rand_string()}, 'repeats': rand_int(), 'seconds': rand_int(), 'minutes': 0, 'cid': cid, } result = ir.invoke(target, 1, 2, 3, **kwargs) self.assertEquals(expected_result, result) self.assertTrue(len(invoking_service.invoke_args), 2) self.assertEquals(invoking_service.invoke_args, (target, 1, 2, 3)) self.assertEquals(invoking_service.invoke_kwargs, {'cid':cid})
def get_response_data(self): return Bunch({'id':rand_int(), 'name':self.name, 'is_active':rand_bool(), 'is_internal':rand_bool(), 'url_path':rand_string(), 'service_id':rand_int(), 'service_name':rand_string(), 'security_id':rand_int(), 'security_name':rand_int(), 'sec_type':rand_string(), 'method':rand_string(), 'soap_action':rand_string(), 'soap_version':rand_string(), 'data_format':rand_string(), 'host':rand_string(), 'ping_method':rand_string(), 'pool_size':rand_int()} )
def test_delete(self): test_wait_time = 0.5 job_sleep_time = 10 job_max_repeats = 30 job1 = Job(rand_int(), 'a', SCHEDULER.JOB_TYPE.INTERVAL_BASED, Interval(seconds=0.1), max_repeats=job_max_repeats) job1.wait_sleep_time = job_sleep_time job2 = Job(rand_int(), 'b', SCHEDULER.JOB_TYPE.INTERVAL_BASED, Interval(seconds=0.1), max_repeats=job_max_repeats) job2.wait_sleep_time = job_sleep_time scheduler = Scheduler(dummy_callback) scheduler.lock = RLock() scheduler.iter_cb = iter_cb scheduler.iter_cb_args = (scheduler, datetime.utcnow() + timedelta(seconds=test_wait_time)) scheduler.create(job1) scheduler.create(job2) scheduler.run() scheduler.unschedule(job1) self.assertIn(job2, scheduler.jobs) self.assertNotIn(job1, scheduler.jobs) self.assertFalse(job1.keep_running) # run - 1 # create - 2 # delete - 1 # on_max_repeats_reached - 0 (because of how long it takes to run job_max_repeats with test_wait_time) # 1+2+1 = 4 self.assertEquals(scheduler.lock.called, 4)
def test_lock_ok(self): """ Succesfully grab a service lock. """ my_kvdb = FakeKVDB() my_kvdb.conn.setnx_return_value = True lock_name = rand_string() expires = 500 + rand_int() # It's 500 which means DummyService.invoke has that many seconds to complete timeout = rand_int() class DummyService(Service): kvdb = my_kvdb def handle(self): with self.lock(lock_name, expires, timeout): pass instance = DummyService() instance.handle() eq_(my_kvdb.conn.delete_args, KVDB.LOCK_SERVICE_PREFIX + lock_name) eq_(my_kvdb.conn.expire_args, (KVDB.LOCK_SERVICE_PREFIX + lock_name, expires)) # First arg is the lock_name that can ne checked directly but the other # one is the expiration time that we can check only approximately, # anything within 3 seconds range is OK. The value of 3 is the maximum # time allowed for execution of DummyService's invoke method which is # way more than needed but let's use 3 to be on the safe side when the # test is run on a very slow system. eq_(my_kvdb.conn.setnx_args[0], KVDB.LOCK_SERVICE_PREFIX + lock_name) expires_approx = time() + expires self.assertAlmostEquals(my_kvdb.conn.setnx_args[1], expires_approx, delta=3)
def test_response(self): request = {'cluster_id':rand_int(), 'name':rand_string()} expected_id = rand_int() expected_name = rand_string() expected_is_active = rand_bool() expected_impl_name = rand_string() expected_is_internal = rand_bool() service = Service() service.id = expected_id service.name = expected_name service.is_active = expected_is_active service.impl_name = expected_impl_name service.is_internal = expected_is_internal expected = Expected() expected.add(service) instance = self.invoke(GetByName, request, expected) response = Bunch(loads(instance.response.payload.getvalue())['response']) eq_(response.id, expected_id) eq_(response.name, expected_name) eq_(response.is_active, expected_is_active) eq_(response.impl_name, expected_impl_name) eq_(response.is_internal, expected_is_internal) eq_(response.usage, 0)
def test_not_implemented_error(self): inner = FakeInnerResponse({}, rand_int(), rand_string(), rand_int()) response_data = (inner, rand_bool(), rand_int(), rand_int(), None) self.assertRaises(NotImplementedError, _Response, *response_data) self.assertRaises(NotImplementedError, _StructuredResponse(*response_data).load_func) self.assertRaises(NotImplementedError, _StructuredResponse(*response_data).set_has_data)
def test_response(self): request = {'cluster_id': rand_int()} expected_keys = get_data().keys() expected_data = tuple(get_data() for x in range(rand_int(10))) expected = Expected() for datum in expected_data: item = ChannelAMQP() for key in expected_keys: value = getattr(datum, key) setattr(item, key, value) expected.add(item) instance = self.invoke(GetList, request, expected) response = loads(instance.response.payload.getvalue())['response'] for idx, item in enumerate(response): expected = expected_data[idx] given = Bunch(item) for key in expected_keys: given_value = getattr(given, key) expected_value = getattr(expected, key) eq_(given_value, expected_value)
def get_response_data(self): return Bunch( {'id':rand_int(), 'name':rand_string(), 'is_active':rand_bool(), 'def_id':rand_int(), 'delivery_mode':rand_int(), 'priority':rand_int(), 'def_name':rand_string(), 'content_type':rand_string(), 'content_encoding':rand_string(), 'cexpiration':rand_int(), 'user_id':rand_string(), 'app_id':rand_string()} )
def get_job(name=None, interval_in_seconds=None, start_time=None, max_repeats=None, callback=None): name = name or rand_string() interval_in_seconds = interval_in_seconds or rand_int() start_time = start_time or rand_date_utc() callback = callback or dummy_callback return Job(rand_int(), name, SCHEDULER.JOB_TYPE.INTERVAL_BASED, Interval(in_seconds=interval_in_seconds), start_time, callback, max_repeats=max_repeats)
def test_get_callback_consumers(self): ps = RedisPubSub(self.kvdb, self.key_prefix) msg_value = '"msg_value"' topic = Topic('/test/delete') ps.add_topic(topic) producer = Client('Producer', 'producer') ps.add_producer(producer, topic) id1 = 'Consumer CB1' name1 = 'consumer-cb1' sub_key1 = new_cid() callback_id1 = rand_int() id2 = 'Consumer CB2' name2 = 'consumer-cb2' sub_key2 = new_cid() callback_id2 = rand_int() consumer_cb1 = Consumer(id1, name1, sub_key=sub_key1, delivery_mode=PUB_SUB.DELIVERY_MODE.CALLBACK_URL.id, callback_id=callback_id1) consumer_cb2 = Consumer(id2, name2, sub_key=sub_key2, delivery_mode=PUB_SUB.DELIVERY_MODE.CALLBACK_URL.id, callback_id=callback_id2) consumer_pull = Consumer('Consumer pull', 'consumer-pull', sub_key=new_cid(), delivery_mode=PUB_SUB.DELIVERY_MODE.PULL.id) consumer_inactive = Consumer( 'Consumer pull', 'consumer-pull', is_active=False, sub_key=new_cid(), delivery_mode=PUB_SUB.DELIVERY_MODE.PULL.id) ps.add_consumer(consumer_cb1, topic) ps.add_consumer(consumer_cb2, topic) ps.add_consumer(consumer_pull, topic) # This one should not be returned because it's a pull one ps.add_consumer(consumer_inactive, topic) # This one should not be returned because it's inactive consumers = list(ps.get_callback_consumers()) # Only 2 are returned, the rest won't make it eq_(len(consumers), 2) # Sort by each consumer's ID, i.e. in lexicographical order consumers.sort(key=attrgetter('id')) consumer1 = consumers[0] eq_(consumer1.id, id1) eq_(consumer1.name, name1) eq_(consumer1.is_active, True) eq_(consumer1.sub_key, sub_key1) eq_(consumer1.callback_id, callback_id1) consumer2 = consumers[1] eq_(consumer2.id, id2) eq_(consumer2.name, name2) eq_(consumer2.is_active, True) eq_(consumer2.sub_key, sub_key2) eq_(consumer2.callback_id, callback_id2)
def test_publish_custom_attrs(self): self._check_publish( **{ "mime_type": rand_string(), "priority": rand_int(), "expiration": rand_int(1000, 2000), "msg_id": rand_string(), } )
def test_job_greenlets(self): data = {'spawned':[], 'stopped': []} class FakeGreenlet(object): def __init__(self, run): self.run = self._run = run def kill(self, *args, **kwargs): data['stopped'].append([self, args, kwargs]) def spawn(job): g = FakeGreenlet(job) data['spawned'].append(g) return g with patch('gevent.spawn', spawn): test_wait_time = 0.5 job_sleep_time = 10 job_max_repeats = 30 job1 = Job(rand_int(), 'a', SCHEDULER.JOB_TYPE.INTERVAL_BASED, Interval(seconds=0.1), max_repeats=job_max_repeats) job1.wait_sleep_time = job_sleep_time job2 = Job(rand_int(), 'b', SCHEDULER.JOB_TYPE.INTERVAL_BASED, Interval(seconds=0.1), max_repeats=job_max_repeats) job2.wait_sleep_time = job_sleep_time scheduler = Scheduler(dummy_callback) scheduler.lock = RLock() scheduler.iter_cb = iter_cb scheduler.iter_cb_args = (scheduler, datetime.utcnow() + timedelta(seconds=test_wait_time)) scheduler.create(job1, spawn=False) scheduler.create(job2, spawn=False) scheduler.run() self.assertEquals(scheduler.job_greenlets[job1.name]._run, job1.run) self.assertEquals(scheduler.job_greenlets[job2.name]._run, job2.run) self.assertTrue(job1.keep_running) self.assertTrue(job2.keep_running) scheduler.unschedule(job1) self.assertFalse(job1.keep_running) self.assertTrue(job2.keep_running) self.assertNotIn(job1.name, scheduler.job_greenlets) self.assertEquals(scheduler.job_greenlets[job2.name]._run, job2.run) self.assertEquals(len(data['stopped']), 1) g, args, kwargs = data['stopped'][0] self.assertIs(g.run.im_func, job1.run.im_func) # That's how we know it was job1 deleted not job2 self.assertIs(args, ()) self.assertDictEqual(kwargs, {'timeout':2.0})
def test_retry_limit_reached_msg(self): retry_repeats = rand_int() service_name = rand_string() retry_seconds = rand_int() orig_cid = rand_string() msg = retry_limit_reached_msg(retry_repeats, service_name, retry_seconds, orig_cid) self.assertEquals(msg, '({}/{}) Retry limit reached for:`{}`, retry_seconds:`{}`, orig_cid:`{}`'.format( retry_repeats, retry_repeats, service_name, retry_seconds, orig_cid))
def setUp(self): self.service_class = Create self.sio = self.service_class.SimpleIO self.id = rand_int() self.def_id = rand_int() self.name = rand_string() self.mock_data = { 'odb': [{'session.query.filter.filter.filter.first': False}, {'session.query.filter.filter.first': Service()}, ], }
def test_retry_failed_msg(self): so_far = rand_int() retry_repeats = rand_int() service_name = rand_string() retry_seconds = rand_int() orig_cid = rand_string() try: raise_exception() except Exception, e: pass
def setUp(self): self.service_class = Edit self.sio = self.service_class.SimpleIO self.id = rand_int() self.def_id = rand_int() self.name = rand_string() self.mock_data = { 'odb': [{'session.query.filter.filter.filter.filter.first': False}, {'session.query.filter.filter.first': Service()}, {'session.query.filter_by.one': ChannelAMQP(self.id)}, ] }
def get_request_data(self): return { "id": rand_int(), "cluster_id": rand_int(), "name": self.name, "is_active": rand_bool(), "address": rand_string(), "socket_type": rand_string(), "service": rand_string(), "sub_key": rand_string(), "data_format": rand_string(), }
def test_subscribe(self): client_id, client_name = rand_int(), rand_string() client = Client(client_id, client_name) topics = rand_string(rand_int()) sub_key = self.api.subscribe(client.id, topics) self.assertEquals(self.api.impl.sub_to_cons[sub_key], client_id) self.assertEquals(self.api.impl.cons_to_sub[client_id], sub_key) self.assertEquals(sorted(self.api.impl.cons_to_topic[client_id]), sorted(topics)) for topic in topics: self.assertIn(client_id, self.api.impl.topic_to_cons[topic])
def test_client_custom_attrs(self): id, name, is_active = rand_int(), rand_string(), rand_bool() client = Client(id, name, is_active) self.assertEquals(client.id, id) self.assertEquals(client.name, name) self.assertEquals(client.is_active, is_active)
def test_on_job_executed_cb(self): data = {'runs':[], 'ctx':[]} def get_context(): ctx = {'name': rand_string(), 'type':SCHEDULER.JOB_TYPE.INTERVAL_BASED} data['ctx'].append(ctx) return ctx def on_job_executed_cb(ctx): data['runs'].append(ctx) test_wait_time = 0.5 job_sleep_time = 0.1 job_max_repeats = 10 job = Job(rand_int(), 'a', SCHEDULER.JOB_TYPE.INTERVAL_BASED, Interval(seconds=0.1), max_repeats=job_max_repeats) job.wait_sleep_time = job_sleep_time job.get_context = get_context scheduler = Scheduler(dummy_callback) scheduler.lock = RLock() scheduler.iter_cb = iter_cb scheduler.iter_cb_args = (scheduler, datetime.utcnow() + timedelta(seconds=test_wait_time)) scheduler.on_job_executed_cb = on_job_executed_cb scheduler.create(job, spawn=False) scheduler.run() self.assertEquals(len(data['runs']), len(data['ctx'])) for idx, item in enumerate(data['runs']): self.assertEquals(data['ctx'][idx], item)
def test_client_ok(self): cid = new_cid() headers = {'x-zato-cid':cid} ok = True _rand = rand_string() soap_action = rand_string() text = """ <soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/"> <soapenv:Body> <abc>{}</abc> </soapenv:Body> </soapenv:Envelope>""".format(_rand).strip() status_code = rand_int() client = self.get_client(FakeInnerResponse(headers, ok, text, status_code)) response = client.invoke(soap_action) expected_response_data = """ <abc xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/">{}</abc> """.format(_rand).strip() eq_(response.details, None) eq_(response.ok, ok) eq_(response.inner.text, text) eq_(etree.tostring(response.data), expected_response_data) eq_(response.has_data, True) eq_(response.cid, cid)
def test_client(self): cid = new_cid() headers = {'x-zato-cid':cid} ok = True status_code = rand_int() service_name = rand_string() service_response_name = '{}_response'.format(service_name) service_response_payload = {'service_id':5207, 'has_wsdl':True} service_response_dict = {'zato_service_has_wsdl_response':service_response_payload} service_response = dumps(service_response_dict).encode('base64') text = dumps({ 'zato_env':{'result':ZATO_OK, 'details':''}, service_response_name: { 'response':service_response } }) client = self.get_client(FakeInnerResponse(headers, ok, text, status_code)) response = client.invoke(service_name, '') eq_(response.ok, ok) eq_(response.inner.text, text) eq_(response.data.items(), service_response_payload.items()) eq_(response.has_data, True) eq_(response.cid, cid)
def test_repr(self): class MyResponse(_Response): def init(self): pass cid = new_cid() ok = True text = rand_string() status_code = rand_int() inner_params = ({'x-zato-cid':cid}, ok, text, status_code) max_repr = ((3,3), (len(text), CID_NO_CLIP)) for(max_response_repr, max_cid_repr) in max_repr: inner = FakeInnerResponse(*inner_params) response = MyResponse(inner, False, max_response_repr, max_cid_repr, None) response.ok = ok cid_ellipsis = '' if max_cid_repr == CID_NO_CLIP else '..' expected = 'ok:[{}] inner.status_code:[{}] cid:[{}{}{}], inner.text:[{}]>'.format( ok, status_code, cid[:max_cid_repr], cid_ellipsis, cid[-max_cid_repr:], text[:max_response_repr]) eq_(repr(response).endswith(expected), True)
def test_delete_metadata(self): payload, topic, producer, ctx = self._publish_move(move=False) consumer = Consumer(rand_int(), rand_string()) self.api.add_consumer(consumer, topic) sub_key = self.api.subscribe(consumer.id, topic.name) self.api.impl.move_to_target_queues() self._check_consumer_queue_before_get(ctx, sub_key) self._check_get(ctx, sub_key, topic, producer, consumer) self.api.acknowledge(sub_key, ctx.msg.msg_id) # Ok, we should now have metadata for the consumer, producer and topic. last_seen_consumer = self.api.impl.kvdb.hkeys(self.api.impl.LAST_SEEN_CONSUMER_KEY) last_seen_producer = self.api.impl.kvdb.hkeys(self.api.impl.LAST_SEEN_PRODUCER_KEY) last_pub_time = self.api.impl.kvdb.hkeys(self.api.impl.LAST_PUB_TIME_KEY) self.assertIn(str(consumer.id), last_seen_consumer) self.assertIn(str(producer.id), last_seen_producer) self.assertIn(topic.name, last_pub_time) self.api.impl.delete_producer(producer, topic) last_seen_producer = self.api.impl.kvdb.hkeys(self.api.impl.LAST_SEEN_PRODUCER_KEY) self.assertNotIn(str(producer.id), last_seen_producer) self.api.impl.delete_consumer(consumer, topic) last_seen_consumer = self.api.impl.kvdb.hkeys(self.api.impl.LAST_SEEN_CONSUMER_KEY) self.assertNotIn(str(consumer.id), last_seen_consumer) self.api.impl.delete_topic(topic) last_pub_time = self.api.impl.kvdb.hkeys(self.api.impl.LAST_PUB_TIME_KEY) self.assertNotIn(topic.name, last_pub_time)
def test_client_defaults(self): id, name = rand_int(), rand_string() client = Client(id, name) self.assertEquals(client.id, id) self.assertEquals(client.name, name) self.assertEquals(client.is_active, True)
def test_client(self): cid = new_cid() headers = {'x-zato-cid':cid} ok = True env = { 'details': rand_string(), 'result': ZATO_OK, 'cid': cid } sio_payload_key = rand_string() sio_payload = {rand_string(): rand_string()} sio_response = { 'zato_env': env, sio_payload_key: sio_payload } text = dumps(sio_response) status_code = rand_int() client = self.get_client(FakeInnerResponse(headers, ok, text, status_code)) response = client.invoke() eq_(response.ok, ok) eq_(response.inner.text, text) eq_(response.data.items(), sio_response[sio_payload_key].items()) eq_(response.has_data, True) eq_(response.cid, cid) eq_(response.cid, sio_response['zato_env']['cid']) eq_(response.details, sio_response['zato_env']['details'])
def test_get_retry_settings_has_invalid_async_callback(self): ir = InvokeRetry(None) callback = [None, rand_string()] repeats = [None, rand_int()] target = rand_string() for callback_item in callback: for repeats_item in repeats: kwargs = { 'async_fallback': True, 'callback': callback_item, 'repeats': repeats_item, } try: ir._get_retry_settings(target, **kwargs) except ValueError, e: for name in 'callback', 'repeats': if name in e.message: self.assertEquals(e.message, 'Could not invoke `{}`, `{}` was not provided ({})'.format( target, name, None)) else: self.assertTrue(callback_item is not None) self.assertTrue(repeats_item is not None)
def get_request_data(self): return {'cluster_id': rand_int()}
def get_response_data(self): return Bunch( {'id':rand_int(), 'name':rand_string(), 'is_active':rand_bool(), 'queue':rand_string(), 'consumer_tag_prefix':rand_string(), 'def_name':rand_string(), 'def_id':rand_int(), 'service_name':rand_string(), 'data_format':rand_string()} )
def get_response_data(self): return Bunch({'id': rand_int(), 'system':rand_string(), 'key':rand_string(), 'value':rand_string()})
def get_request_data(self): return {'cluster_id':rand_int(), 'name':self.name, 'is_active':rand_bool(), 'def_id':self.def_id, 'queue':rand_string(), 'consumer_tag_prefix':rand_string(), 'service':rand_string(), 'data_format':rand_string()}
def test_edit(self): def callback(): pass def on_max_repeats_reached_cb(): pass start_time = datetime.utcnow() test_wait_time = 0.5 job_interval1, job_interval2 = 2, 3 job_sleep_time = 10 job_max_repeats1, job_max_repeats2 = 20, 30 scheduler = Scheduler(get_scheduler_config(), None) scheduler.lock = RLock() scheduler.iter_cb = iter_cb scheduler.iter_cb_args = (scheduler, datetime.utcnow() + timedelta(seconds=test_wait_time)) def check(scheduler, job, label): self.assertIn(job.name, scheduler.job_greenlets) self.assertIn(job, scheduler.jobs) self.assertEquals(1, len(scheduler.job_greenlets)) self.assertEquals(1, len(scheduler.jobs)) self.assertIs(job.run.im_func, scheduler.job_greenlets.values()[0]._run.im_func) clone = list(scheduler.jobs)[0] for name in 'name', 'interval', 'cb_kwargs', 'max_repeats', 'is_active': expected = getattr(job, name) given = getattr(clone, name) self.assertEquals( expected, given, '{} != {} ({})'.format(expected, given, name)) job_cb = job.callback clone_cb = clone.callback job_on_max_cb = job.on_max_repeats_reached_cb clone_on_max_cb = clone.on_max_repeats_reached_cb if label == 'first': self.assertEquals(job.start_time, clone.start_time) self.assertIs(job_cb.im_func, clone_cb.im_func) self.assertIs(job_on_max_cb.im_func, clone_on_max_cb.im_func) else: self.assertEquals(job.start_time, clone.start_time) self.assertIs(clone_cb.im_func, scheduler.on_job_executed.im_func) self.assertIs(clone_on_max_cb.im_func, scheduler.on_max_repeats_reached.im_func) job1 = Job(rand_int(), 'a', SCHEDULER.JOB_TYPE.INTERVAL_BASED, Interval(seconds=job_interval1), start_time, max_repeats=job_max_repeats1) job1.callback = callback job1.on_max_repeats_reached_cb = on_max_repeats_reached_cb job1.wait_sleep_time = job_sleep_time job2 = Job(rand_int(), 'a', SCHEDULER.JOB_TYPE.INTERVAL_BASED, Interval(seconds=job_interval2), start_time, max_repeats=job_max_repeats2) job2.callback = callback job2.on_max_repeats_reached_cb = on_max_repeats_reached_cb job2.wait_sleep_time = job_sleep_time scheduler.run() scheduler.create(job1) sleep(test_wait_time) # We have only job1 at this point check(scheduler, job1, 'first') # Removes job1 along the way .. scheduler.edit(job2) # .. so now job2 is the now removed job1. check(scheduler, job2, 'second')
def get_response_data(self): return Bunch({'id': rand_int(), 'name': rand_string()})
def get_request_data(self): return {'id': rand_int()}
def test_run(self): test_wait_time = 0.3 sched_sleep_time = 0.1 data = {'sleep': [], 'jobs': set()} def _sleep(value): data['sleep'].append(value) def spawn_job(job): data['jobs'].add(job) def job_run(self): pass job1, job2, job3 = [get_job(str(x)) for x in range(3)] # Already run out of max_repeats and should not be started job4 = Job(rand_int(), rand_string(), SCHEDULER.JOB_TYPE.INTERVAL_BASED, start_time=parse('1997-12-23 21:24:27'), interval=Interval(seconds=5), max_repeats=3) job1.run = job_run job2.run = job_run job3.run = job_run job4.run = job_run config = Bunch() config.on_job_executed_cb = dummy_callback config._add_startup_jobs = False config._add_scheduler_jobs = False config.startup_jobs = [] config.odb = None config.job_log_level = 'info' scheduler = Scheduler(get_scheduler_config(), None) scheduler.spawn_job = spawn_job scheduler.lock = RLock() scheduler.sleep = _sleep scheduler.sleep_time = sched_sleep_time scheduler.iter_cb = iter_cb scheduler.iter_cb_args = (scheduler, datetime.utcnow() + timedelta(seconds=test_wait_time)) scheduler.create(job1, spawn=False) scheduler.create(job2, spawn=False) scheduler.create(job3, spawn=False) scheduler.create(job4, spawn=False) scheduler.run() self.assertEquals(3, len(data['jobs'])) self.assertTrue(scheduler.lock.called) for item in data['sleep']: self.assertEquals(sched_sleep_time, item) for job in job1, job2, job3: self.assertIn(job, data['jobs']) self.assertNotIn(job4, data['jobs'])
def test_get_reject_acknowledge(self): client_id, client_name = rand_int(), rand_string() payload, topic, producer, ctx = self._publish_move( move=False, add_consumer=True, client_id=client_id, client_name=client_name) client = Client(client_id, client_name) sub_key = self.api.subscribe(client.id, topic.name) # Moves a message to the consumer's queue self.api.impl.move_to_target_queues() self._check_consumer_queue_before_get(ctx, sub_key) # Consumer gets a message which puts it in the in-flight state. self._check_get(ctx, sub_key, topic, producer, client) # However, there should be nothing in the consumer's queue. consumer_msg_ids = self.kvdb.lrange( self.api.impl.CONSUMER_MSG_IDS_PREFIX.format(sub_key), 0, -1) self.assertEquals(consumer_msg_ids, []) # Consumer rejects the message which puts it back on a queue. self.api.reject(sub_key, ctx.msg.msg_id) # After rejection it's as though the message has just been published. self._check_consumer_queue_before_get(ctx, sub_key) # Get after rejection works as before. self._check_get(ctx, sub_key, topic, producer, client) # Consumer acknowledges a message. self.api.acknowledge(sub_key, ctx.msg.msg_id) keys = self.kvdb.keys('{}*'.format(self.key_prefix)) self.assertEquals(len(keys), 3) now = datetime.utcnow() last_pub_time = parse( self.kvdb.hgetall(self.api.impl.LAST_PUB_TIME_KEY)[topic.name]) last_seen_consumer = parse( self.kvdb.hgetall(self.api.impl.LAST_SEEN_CONSUMER_KEY)[str( client.id)]) last_seen_producer = parse( self.kvdb.hgetall(self.api.impl.LAST_SEEN_PRODUCER_KEY)[str( producer.id)]) self.assertTrue( last_pub_time < now, 'last_pub_time:`{}` is not less than now:`{}`'.format( last_pub_time, now)) self.assertTrue( last_seen_consumer < now, 'last_seen_consumer:`{}` is not less than now:`{}`'.format( last_seen_consumer, now)) self.assertTrue( last_seen_producer < now, 'last_seen_producer:`{}` is not less than now:`{}`'.format( last_seen_producer, now))
def get_request_data(self): return {'package_id': rand_int()}
def test_handle_simple_ok_exception(self): test_instance = self for is_ok in True, False: expected_response = 'expected_response_{}'.format( rand_string()) if is_ok else None class Ping(Service): name = 'zato.ping' def __init__(self): self.response = Bunch(payload=expected_response) test_instance.given_response = expected_response def accept(self): return True def update(self, *ignored_args, **ignored_kwargs): if not is_ok: raise Exception() def set_response_data(self, *ignored_args, **ignored_kwargs): return expected_response post_handle = validate_output = handle = validate_input = call_hooks = pre_handle = update set_up_class_attributes(Ping) Ping.server = MagicMock() Ping.kvdb = MagicMock() callback = rand_string() target = 'zato.ping' orig_cid, call_cid = new_cid(), new_cid() source, req_ts_utc = rand_string(), rand_string() ping_impl = 'zato.service.internal.Ping' service_store_name_to_impl_name = { 'zato.ping': ping_impl, callback: ping_impl } service_store_impl_name_to_service = {ping_impl: Ping} payload = { 'source': source, 'req_ts_utc': req_ts_utc, 'orig_cid': orig_cid, 'call_cid': call_cid, 'callback': callback, 'callback_context': { rand_string(): rand_string() }, 'target': target, 'retry_repeats': 4, 'retry_seconds': 0.1, 'args': [1, 2, 3], 'kwargs': { rand_string(): rand_string(), rand_int(): rand_int() }, } instance = self.invoke( InvokeRetry, dumps(payload), None, service_store_name_to_impl_name=service_store_name_to_impl_name, service_store_impl_name_to_service= service_store_impl_name_to_service) gevent.sleep(0.5) self.assertEquals(len(instance.broker_client.invoke_async_args), 1) self.assertEquals(len(instance.broker_client.invoke_async_args[0]), 1) async_msg = Bunch(instance.broker_client.invoke_async_args[0][0]) self.assertEquals(len(async_msg), 11) self.assertEquals(async_msg.action, SERVICE.PUBLISH.value) self.assertEquals(async_msg.channel, CHANNEL.INVOKE_ASYNC) self.assertEquals(async_msg.data_format, DATA_FORMAT.DICT) self.assertEquals(async_msg.transport, None) resp_ts_utc = async_msg.payload.pop('resp_ts_utc') # Is it a date? If not, an exception will be raised while parsing. arrow.get(resp_ts_utc) expected_response_msg = { 'ok': is_ok, 'source': source, 'target': target, 'req_ts_utc': req_ts_utc, 'orig_cid': orig_cid, 'call_cid': call_cid, 'retry_seconds': payload['retry_seconds'], 'context': payload['callback_context'], 'retry_repeats': payload['retry_repeats'], 'response': expected_response, } self.assertDictEqual(expected_response_msg, async_msg.payload) self.assertTrue( len(instance.broker_client.invoke_async_kwargs) == 1) self.assertEquals(instance.broker_client.invoke_async_kwargs[0], {'expiration': BROKER.DEFAULT_EXPIRATION})
def test_job_greenlets(self): data = {'spawned': [], 'stopped': []} class FakeGreenlet(object): def __init__(_self, run): _self.run = _self._run = run def kill(_self, *args, **kwargs): data['stopped'].append([_self, args, kwargs]) def spawn(scheduler_instance, job, *args, **kwargs): g = FakeGreenlet(job) data['spawned'].append(g) return g with patch('zato.scheduler.backend.Scheduler._spawn', spawn): test_wait_time = 0.5 job_sleep_time = 10 job_max_repeats = 30 job1 = Job(rand_int(), 'a', SCHEDULER.JOB_TYPE.INTERVAL_BASED, Interval(seconds=0.1), max_repeats=job_max_repeats) job1.wait_sleep_time = job_sleep_time job2 = Job(rand_int(), 'b', SCHEDULER.JOB_TYPE.INTERVAL_BASED, Interval(seconds=0.1), max_repeats=job_max_repeats) job2.wait_sleep_time = job_sleep_time scheduler = Scheduler(get_scheduler_config(), None) scheduler.lock = RLock() scheduler.iter_cb = iter_cb scheduler.iter_cb_args = (scheduler, datetime.utcnow() + timedelta(seconds=test_wait_time)) scheduler.create(job1) scheduler.create(job2) scheduler.run() self.assertEquals(scheduler.job_greenlets[job1.name]._run, job1.run) self.assertEquals(scheduler.job_greenlets[job2.name]._run, job2.run) self.assertTrue(job1.keep_running) self.assertTrue(job2.keep_running) scheduler.unschedule(job1) self.assertFalse(job1.keep_running) self.assertTrue(job2.keep_running) self.assertNotIn(job1.name, scheduler.job_greenlets) self.assertEquals(scheduler.job_greenlets[job2.name]._run, job2.run) self.assertEquals(len(data['stopped']), 1) g, args, kwargs = data['stopped'][0] self.assertIs(g.run.im_func, job1.run.im_func ) # That's how we know it was job1 deleted not job2 self.assertIs(args, ()) self.assertDictEqual(kwargs, {'timeout': 2.0, 'block': False})
def get_response_data(self): return Bunch({'id': rand_int(), 'name': self.name})
def setUp(self): self.service_class = Create self.sio = self.service_class.SimpleIO self.id = rand_int() self.def_id = rand_int() self.name = rand_string()
def get_request_data(self): return ({ 'cluster_id': rand_int(), 'name': rand_string(), 'is_active': rand_bool() })
def get_response_data(self): return Bunch({ 'id': rand_int(), 'name': self.name, 'is_active': rand_bool() })
def get_response_data(self): return Bunch({'id':rand_int()})
def get_request_data(self): return {'id':rand_int(), 'cluster_id':rand_int(), 'name':rand_string(), 'is_active':rand_bool(), 'host':rand_string(),'port':rand_int(), 'dircache':rand_bool(), 'user':rand_string(), 'acct':rand_string(), 'timeout':rand_int()}
def get_request_data(self): return {'system':rand_string(), 'key':rand_string(), 'value':rand_string(), 'id': rand_int()}
def get_request_data(self): return {'id':rand_int(), 'name':rand_string(), 'is_active':rand_bool(), 'cluste_id':rand_int(), 'engine':rand_string(), 'host':rand_string(), 'port':rand_int(), 'db_name':rand_string(), 'username':rand_string(), 'pool_size':rand_int(), 'extra':rand_string()}
def get_request_data(self): return ({'id': rand_int(), 'name': rand_string()})
def test_access_log(self): def _utcnow(): return datetime(year=2014, month=1, day=12, hour=16, minute=22, second=12, tzinfo=UTC) local_tz = get_localzone() _now = _utcnow() local_dt = _now.replace(tzinfo=UTC).astimezone(local_tz) local_dt = local_tz.normalize(local_dt) request_timestamp = local_dt.strftime(ACCESS_LOG_DT_FORMAT) response = rand_string() * rand_int() cid = new_cid() cluster_id = 1 channel_name = rand_string() url_path = '/{}'.format(rand_string()) user_agent = rand_string() http_version = rand_string() request_method = rand_string() remote_ip = '10.{}.{}.{}'.format(rand_int(), rand_int(), rand_int()) req_timestamp_utc = utcnow() channel_item = { 'name': channel_name, 'is_active': True, 'transport': 'plain_http', 'data_format': None, 'match_target': url_path, 'method': '', } wsgi_environ = { 'gunicorn.socket': FakeGunicornSocket(None, None), 'wsgi.url_scheme': 'http', 'wsgi.input': StringIO(response), 'zato.http.response.status': httplib.OK, 'zato.channel_item': channel_item, 'zato.request_timestamp_utc': req_timestamp_utc, 'HTTP_X_FORWARDED_FOR': remote_ip, 'PATH_INFO': url_path, 'REQUEST_METHOD': request_method, 'SERVER_PROTOCOL': http_version, 'HTTP_USER_AGENT': user_agent, } class FakeBrokerClient(object): def __init__(self): self.msg = None def publish(self, msg): self.msg = msg class FakeODB(ODBManager): def __init__(self): self.msg = None self.cluster = Bunch(id=cluster_id) class FakeURLData(URLData): def __init__(self): self.url_sec = { url_path: Bunch(sec_def=ZATO_NONE, sec_use_rbac=False) } def match(self, *ignored_args, **ignored_kwargs): return True, channel_item class FakeRequestHandler(object): def handle(self, *ignored_args, **ignored_kwargs): return Bunch(payload=response, content_type='text/plain', headers={}, status_code=httplib.OK) class FakeAccessLogger(object): def __init__(self): self.level = object() self.msg = object() self.args = object() self.exc_info = object() self.extra = object() def _log(self, level, msg, args, exc_info, extra): self.level = level self.msg = msg self.args self.exc_info = exc_info self.extra = extra def isEnabledFor(self, ignored): return True bc = FakeBrokerClient() ws = FakeWorkerStore() ws.request_dispatcher = RequestDispatcher() ws.request_dispatcher.request_handler = FakeRequestHandler() ws.request_dispatcher.url_data = FakeURLData() ws.request_dispatcher.url_data.broker_client = bc ws.request_dispatcher.url_data.odb = FakeODB() ps = ParallelServer() ps.worker_store = ws ps.request_dispatcher_dispatch = ws.request_dispatcher.dispatch ps.access_logger = FakeAccessLogger() ps.access_logger_log = ps.access_logger._log ps.on_wsgi_request(wsgi_environ, StartResponse(), cid=cid, _utcnow=_utcnow) extra = Bunch(ps.access_logger.extra) eq_(extra.channel_name, channel_name) eq_(extra.user_agent, user_agent) eq_(extra.status_code, '200') eq_(extra.http_version, http_version) eq_(extra.response_size, len(response)) eq_(extra.path, url_path) eq_(extra.cid_resp_time, '{}/0.0'.format( cid)) # It's 0.0 because we mock utcnow to be a constant value eq_(extra.method, request_method) eq_(extra.remote_ip, remote_ip) eq_(extra.req_timestamp_utc, '12/Jan/2014:16:22:12 +0000') eq_(extra.req_timestamp, request_timestamp)
def get_response_data(self): return Bunch( {'id':rand_int(), 'name':rand_string(), 'is_active':rand_bool(), 'host':rand_string(), 'port':rand_int(), 'user':rand_string(), 'acct':rand_string(), 'timeout':rand_int(), 'dircache':rand_bool()} )
def get_request_data(self): return {'id': rand_int(), 'current_tech_account_name': rand_string()}
def get_response_data(self): return Bunch( {'id':rand_int(), 'name':rand_string(), 'is_active':rand_bool(), 'cluster_id':rand_int(), 'engine':rand_string(), 'host':rand_string(), 'port':rand_int(), 'db_name':rand_string(), 'username':rand_string(), 'pool_size':rand_int(), 'extra':rand_string()} )
def get_request_data(self): return { 'id': rand_int(), 'password1': rand_string(), 'password2': rand_string() }
def test_audit(self): for expected_audit_enabled in (True, False): for expected_status_code in (httplib.OK, httplib.FORBIDDEN): for use_x_remote_addr in (True, False): expected_auth_ok = True if expected_status_code == httplib.OK else False expected_invoke_ok = True if expected_auth_ok is True else False expected_cid = new_cid() expected_url_scheme = rand_string() expected_payload = rand_string() expected_audit_repl_patt_type = rand_string() expected_replace_patterns_json_pointer = [] expected_replace_patterns_xpath = [] expected_cluster_id = rand_int() expected_id = rand_int() expected_name = rand_string() expected_password = '******' expected_username = rand_string() expected_transport = rand_string() expected_connection = rand_string() expected_data_format = DATA_FORMAT.JSON expected_is_active = True expected_request = rand_string() expected_audit_max_payload = len( expected_request ) - 7 # Substracting any value would do expected_channel_item_key1 = rand_string() expected_channel_item_value1 = rand_string() expected_match_target = rand_string() channel_item = { 'id': expected_id, 'name': expected_name, 'transport': expected_transport, 'connection': expected_connection, 'audit_enabled': expected_audit_enabled, expected_channel_item_key1: expected_channel_item_value1, 'audit_repl_patt_type': expected_audit_repl_patt_type, 'replace_patterns_json_pointer': expected_replace_patterns_json_pointer, 'replace_patterns_xpath': expected_replace_patterns_xpath, 'audit_max_payload': expected_audit_max_payload, 'is_active': expected_is_active, 'data_format': DATA_FORMAT.JSON, 'match_target': expected_match_target, 'username': expected_username, 'method': '', } wsgi_environ = { 'wsgi.url_scheme': expected_url_scheme, 'gunicorn.socket': FakeGunicornSocket(None, None), 'zato.http.response.status': expected_status_code, 'zato.http.channel_item': channel_item, 'PATH_INFO': rand_string(), 'wsgi.input': StringIO(expected_request), 'REQUEST_METHOD': rand_string(), 'SERVER_PROTOCOL': rand_string(), 'HTTP_USER_AGENT': rand_string(), } expected_remote_addr = rand_string() if use_x_remote_addr: expected_remote_addr_header = 'HTTP_X_FORWARDED_FOR' wsgi_environ[ expected_remote_addr_header] = expected_remote_addr else: expected_remote_addr_header = 'REMOTE_ADDR' wsgi_environ[ expected_remote_addr_header] = expected_remote_addr class FakeSession: def __init__(self, audit=None): self.audit = audit self.commit_called = False def close(self): pass def commit(self): self.commit_called = True def add(self, audit): self.audit = audit fake_session = FakeSession() class FakeBrokerClient(object): def __init__(self): self.msg = None def publish(self, msg): self.msg = msg class FakeODB(ODBManager): def __init__(self): self.msg = None self.cluster = Bunch(id=expected_cluster_id) def session(self): return fake_session class FakeURLData(URLData): def __init__(self): self.url_sec = { expected_match_target: Bunch(sec_def=ZATO_NONE) } def match(self, *ignored_args, **ignored_kwargs): return True, channel_item class FakeRequestHandler(object): def handle(self, *ignored_args, **ignored_kwargs): return Bunch(payload=expected_payload, content_type='text/plain', headers={}, status_code=expected_status_code) bc = FakeBrokerClient() ws = FakeWorkerStore() ws.request_dispatcher = RequestDispatcher() ws.request_dispatcher.request_handler = FakeRequestHandler( ) ws.request_dispatcher.url_data = FakeURLData() ws.request_dispatcher.url_data.broker_client = bc ws.request_dispatcher.url_data.odb = FakeODB() ps = ParallelServer() ps.worker_store = ws ps.on_wsgi_request(wsgi_environ, StartResponse(), cid=expected_cid) if expected_audit_enabled: # # Audit 1/2 - Request # # Parsing will confirm the proper value was used datetime.strptime( fake_session.audit.req_time.isoformat(), '%Y-%m-%dT%H:%M:%S.%f') self.assertEquals(fake_session.audit.name, expected_name) self.assertEquals(fake_session.audit.cid, expected_cid) self.assertEquals(fake_session.audit.transport, expected_transport) self.assertEquals(fake_session.audit.connection, expected_connection) self.assertEquals(fake_session.audit.resp_time, None) self.assertEquals(fake_session.audit.user_token, expected_username) self.assertEquals(fake_session.audit.auth_ok, None) self.assertEquals(fake_session.audit.invoke_ok, None) self.assertEquals(fake_session.audit.remote_addr, expected_remote_addr) self.assertEquals( fake_session.audit.req_payload, expected_request[:expected_audit_max_payload]) self.assertEquals(fake_session.audit.resp_headers, None) self.assertEquals(fake_session.audit.resp_payload, None) req_headers = literal_eval( fake_session.audit.req_headers) self.assertEquals( req_headers[expected_remote_addr_header], repr(expected_remote_addr)) self.assertEquals(req_headers['wsgi.url_scheme'], repr(expected_url_scheme)) self.assertEquals(req_headers['gunicorn.socket'], repr(FakeGunicornSocket(None, None))) channel_item = literal_eval( req_headers['zato.http.channel_item']) self.assertEquals(channel_item['audit_max_payload'], expected_audit_max_payload) self.assertEquals(channel_item['name'], expected_name) self.assertEquals(channel_item['username'], expected_username) self.assertEquals( channel_item[expected_channel_item_key1], expected_channel_item_value1) self.assertEquals(channel_item['audit_repl_patt_type'], expected_audit_repl_patt_type) self.assertEquals( channel_item['replace_patterns_json_pointer'], expected_replace_patterns_json_pointer) self.assertEquals(channel_item['is_active'], expected_is_active) self.assertEquals(channel_item['data_format'], expected_data_format) self.assertEquals(channel_item['audit_enabled'], expected_audit_enabled) self.assertEquals(channel_item['password'], expected_password) self.assertEquals(channel_item['transport'], expected_transport) self.assertEquals(channel_item['match_target'], expected_match_target) # # Audit 2/2 - Response # self.assertEquals( bc.msg['action'], CHANNEL_BROKER_MESSAGE. HTTP_SOAP_AUDIT_RESPONSE.value) self.assertEquals(bc.msg['cid'], expected_cid) self.assertEquals(bc.msg['data_format'], DATA_FORMAT.JSON) self.assertEquals( bc.msg['service'], 'zato.http-soap.set-audit-response-data') payload = loads(bc.msg['payload']) self.assertEquals(payload['auth_ok'], expected_auth_ok) self.assertEquals(payload['invoke_ok'], expected_invoke_ok) self.assertEquals(payload['resp_payload'], expected_payload) # Parsing alone will check its format is valid datetime.strptime(payload['resp_time'], '%Y-%m-%dT%H:%M:%S.%f') wsgi_environ = loads(payload['resp_headers']) self.assertEquals(wsgi_environ['wsgi.url_scheme'], repr(expected_url_scheme)) self.assertEquals(wsgi_environ['gunicorn.socket'], repr(FakeGunicornSocket(None, None))) self.assertEquals( wsgi_environ['zato.http.response.status'], "'{} {}'".format( expected_status_code, httplib.responses[expected_status_code], )) channel_item = literal_eval( wsgi_environ['zato.http.channel_item']) self.assertEquals( channel_item[expected_channel_item_key1], expected_channel_item_value1) self.assertEquals(channel_item['audit_enabled'], expected_audit_enabled) self.assertEquals(channel_item['audit_repl_patt_type'], expected_audit_repl_patt_type) self.assertEquals( channel_item['replace_patterns_json_pointer'], expected_replace_patterns_json_pointer) self.assertEquals( channel_item['replace_patterns_xpath'], expected_replace_patterns_xpath) self.assertEquals(channel_item['name'], expected_name) self.assertEquals(channel_item['id'], expected_id) self.assertEquals(channel_item['password'], expected_password) self.assertEquals(channel_item['data_format'], expected_data_format) self.assertEquals(channel_item['transport'], expected_transport) self.assertEquals(channel_item['connection'], expected_connection) self.assertEquals(channel_item['audit_max_payload'], expected_audit_max_payload) self.assertEquals(channel_item['is_active'], expected_is_active) else: # Audit not enabled so no response audit message was published on the broker self.assertTrue(bc.msg is None)
def test_interval_has_in_seconds(self): in_seconds = rand_int() interval = Interval(in_seconds=in_seconds) self.assertEquals(interval.in_seconds, in_seconds)
def test_main_loop_sleep_spawn_called(self): wait_time = 0.2 sleep_time = rand_int() now_values = [ parse('2019-12-23 22:19:03'), parse('2021-05-13 17:35:48') ] sleep_history = [] spawn_history = [] sleep_time_history = [] def sleep(value): if value != wait_time: sleep_history.append(value) def spawn(*args, **kwargs): spawn_history.append([args, kwargs]) def get_sleep_time(*args, **kwargs): sleep_time_history.append(args[1]) return sleep_time with patch('gevent.sleep', sleep): with patch('zato.scheduler.backend.Job._spawn', spawn): with patch('zato.scheduler.backend.Job.get_sleep_time', get_sleep_time): for now in now_values: self.now = now with patch('zato.scheduler.backend.datetime', self._datetime): for job_type in SCHEDULER.JOB_TYPE.CRON_STYLE, SCHEDULER.JOB_TYPE.INTERVAL_BASED: max_repeats = choice(range(2, 5)) cb_kwargs = { rand_string(): rand_string(), rand_string(): rand_string() } interval = Interval(seconds=sleep_time) if job_type == SCHEDULER.JOB_TYPE.INTERVAL_BASED \ else CronTab(DEFAULT_CRON_DEFINITION) job = Job(rand_int(), rand_string(), job_type, interval, max_repeats=max_repeats) if job.type == SCHEDULER.JOB_TYPE.CRON_STYLE: job.cron_definition = DEFAULT_CRON_DEFINITION job.cb_kwargs = cb_kwargs job.start_time = datetime.utcnow() job.callback = dummy_callback self.assertTrue(job.main_loop()) time.sleep(0.5) self.assertEquals(max_repeats, len(sleep_history)) self.assertEquals(max_repeats, len(spawn_history)) for item in sleep_history: self.assertEquals(sleep_time, item) for idx, (callback, ctx_dict) in enumerate( spawn_history, 1): self.assertEquals(2, len(callback)) callback = callback[1] self.check_ctx(ctx_dict['ctx'], job, sleep_time, max_repeats, idx, cb_kwargs, len(spawn_history), job_type) self.assertIs(callback, dummy_callback) del sleep_history[:] del spawn_history[:] for item in sleep_time_history: self.assertEquals(item, now) del sleep_time_history[:]
def _get_config(self): return {'is_active':True, 'sec_type':rand_string(), 'address_host':rand_string(), 'address_url_path':rand_string(), 'ping_method':rand_string(), 'soap_version':'1.1', 'pool_size':rand_int(), 'serialization_type':'string', 'timeout':rand_int(), 'tls_verify':ZATO_NONE, 'data_format':'', 'content_type':''}