def test_redis_pool_and_reqid_cleanup(self, docker_client, redis): reqids = [] # start and kill containers res = self.client.post('/api/flock/request/test_vol', json={'user_params': {'foo': 'bar'}}) reqid = res.json['reqid'] res = self.client.post('/api/flock/start/{0}'.format(reqid)) reqids.append(reqid) try: docker_client.containers.get(res.json['containers']['box-1']['id']).remove(force=True) except: pass try: docker_client.containers.get(res.json['containers']['box-2']['id']).remove(force=True) except: pass def assert_removed(): for reqid in reqids: assert not redis.exists('req:' + reqid) assert len(redis.smembers('p:test-pool:f')) == 0 assert redis.keys('*') == [] sleep_try(1.0, 10.0, assert_removed)
def test_expire_unused(self, redis): def assert_done(): assert redis.scard('p:fixed-pool:f') == 3 sleep_try(0.2, 6.0, assert_done) assert redis.hget('p:fixed-pool:i', 'max_size') == '3' res = self.start(self.pending[3]) assert res['queue'] == 3 res, reqid = self.queue_req() assert res['queue'] == 7 assert reqid == self.pending[7] # simulate delete self.delete_reqid(redis, self.pending[0]) self.delete_reqid(redis, self.pending[1]) self.delete_reqid(redis, self.pending[2]) self.delete_reqid(redis, self.pending[4]) # removed expired reqids res = self.start(self.pending[3]) assert res['queue'] == 0 res = self.start(self.pending[5]) assert res['queue'] == 1 res = self.start(self.pending[7]) assert res['queue'] == 3 # get new number res = self.start(self.pending[0]) assert res['queue'] == 4
def test_check_untracked_cleanup(self, docker_client, redis, shepherd): num_containers = self._count_containers(docker_client, shepherd) num_volumes = self._count_volumes(docker_client, shepherd) num_networks = self._count_volumes(docker_client, shepherd) for x in range(0, 3): res = self.client.post('/api/flock/request/test_vol') reqid = res.json['reqid'] res = self.client.post('/api/flock/start/{0}'.format(reqid)) assert res.json['containers'] assert num_containers < self._count_containers(docker_client, shepherd) assert num_volumes < self._count_volumes(docker_client, shepherd) assert num_networks < self._count_networks(docker_client, shepherd) # wipe all redis data redis.flushdb() def assert_removed(): assert num_containers == self._count_containers(docker_client, shepherd) assert num_volumes == self._count_volumes(docker_client, shepherd) #assert num_networks == self._count_networks(docker_client, shepherd) sleep_try(2.0, 20.0, assert_removed)
def test_stop_one_run_next(self, redis, persist_pool): reqid = redis.srandmember('p:{0}:f'.format(persist_pool.name)) num_started = len(persist_pool.start_events) num_stopped = len(persist_pool.stop_events) self.stop(reqid) def assert_done(): assert len(persist_pool.stop_events) >= num_stopped + 2 assert len(persist_pool.start_events) >= num_started + 2 sleep_try(0.2, 5.0, assert_done)
def test_flock_start(self, redis): res = self.client.post('/api/flock/request/test_b') reqid = res.json['reqid'] res = self.client.post('/api/flock/start/' + reqid) assert res.json['network'] assert res.json['containers']['box'] TestTimedPoolShutdownContainer.container = res.json['containers']['box'] TestTimedPoolShutdownContainer.reqid = reqid def assert_done(): assert redis.scard('p:test-pool:f') == 1 assert redis.ttl('p:test-pool:rq:'+ reqid) == 1.0 sleep_try(0.2, 6.0, assert_done)
def test_remove_all(self, redis, persist_pool): while len(self.reqids) > 0: remove = self.reqids.pop() self.stop(remove) #time.sleep(0.2) def assert_done(): assert redis.scard('p:{0}:f'.format(persist_pool.name)) == 0 assert redis.llen('p:{0}:wq'.format(persist_pool.name)) == 0 assert redis.scard('p:{0}:ws'.format(persist_pool.name)) == 0 assert redis.scard('p:{0}:a'.format(persist_pool.name)) == 0 assert persist_pool.reqid_starts == persist_pool.reqid_stops sleep_try(0.2, 30.0, assert_done)
def test_flock_stop(self, pool, redis): res = self.client.post('/api/flock/stop/' + self.reqid) assert res.json['success'] == True def assert_done(): assert len(pool.stop_events) == 2 sleep_try(0.2, 6.0, assert_done) for event in pool.stop_events: assert event['Action'] == 'die' assert event['Actor']['Attributes'][ pool.shepherd.reqid_label] == self.reqid assert not redis.exists('p:test-pool:rq:' + self.reqid) assert redis.scard('p:test-pool:f') == 0
def test_flock_kill_container(self, redis, app, docker_client): assert redis.exists('p:test-pool:rq:' + self.reqid) try: docker_client.containers.get(self.container['id']).kill() except: pass pool = app.pools['test-pool'] def assert_done(): assert not redis.exists('p:test-pool:rq:' + self.reqid) assert redis.scard('p:test-pool:f') == 0 assert len(pool.stop_events) == 2 sleep_try(0.2, 6.0, assert_done)
def test_full_continue_running(self, redis, persist_pool): for x in range(1, 4): res, reqid = self.do_req_and_start(persist_pool) assert res['containers']['box'] assert redis.scard('p:{0}:f'.format(persist_pool.name)) == x # duplicate request get same response new_res = self.client.post('/api/flock/start/' + reqid) assert res == new_res.json def assert_done(): assert len(persist_pool.start_events) == 6 assert len(persist_pool.stop_events) == 0 assert redis.llen('p:{0}:wq'.format(persist_pool.name)) == 0 assert redis.scard('p:{0}:f'.format(persist_pool.name)) == 3 sleep_try(0.2, 5.0, assert_done)
def test_ensure_flock_stop(self, docker_client): res = self.client.post('/api/flock/request/test_b') reqid = res.json['reqid'] res = self.client.post('/api/flock/start/{0}'.format(reqid)) assert res.json['containers'] box = docker_client.containers.get(res.json['containers']['box']['id']) box_2 = docker_client.containers.get(res.json['containers']['box-2']['id']) box.remove(force=True) def assert_removed(): with pytest.raises(docker.errors.NotFound): box = docker_client.containers.get(res.json['containers']['box-2']['id']) sleep_try(0.3, 10.0, assert_removed)
def test_expire_queue_next_in_order(self, redis, docker_client): self.remove_next(docker_client) def assert_done(): assert redis.scard('p:fixed-pool:f') == 2 sleep_try(0.2, 6.0, assert_done) res = self.client.post('/api/flock/start/' + self.pending[1]) assert res.json['queue'] == 1 res = self.client.post('/api/flock/start/' + self.pending[0]) assert res.json['containers']['box'] self.ids.append(res.json['containers']['box']['id']) res = self.client.post('/api/flock/start/' + self.pending[1]) assert res.json['queue'] == 0 self.pending.pop(0)
def test_dont_reque_on_clean_exit(self, redis, persist_pool, docker_client): # if a clean exit (exit code, 0) res, reqid = self.do_req_and_start( persist_pool, overrides={'box': 'test-shepherd/exit0'}) assert res['containers']['box'] assert redis.scard('p:{0}:f'.format(persist_pool.name)) == 1 new_res = self.client.post('/api/flock/start/' + reqid) def assert_done(): # not running assert redis.scard('p:{0}:f'.format(persist_pool.name)) == 0 # not queued for restart assert redis.scard('p:{0}:ws'.format(persist_pool.name)) == 0 assert len(persist_pool.start_events) == 2 assert len(persist_pool.stop_events) == 2 assert persist_pool.reqid_starts[reqid] == 2 assert persist_pool.reqid_stops[reqid] == 2 sleep_try(0.2, 20.0, assert_done) containers = res['containers'] for container in containers.values(): assert docker_client.containers.get( container['id']).status == 'exited' rem_res = self.client.post('/api/flock/remove/' + reqid) assert rem_res.json.get('success') for container in containers.values(): with pytest.raises(docker.errors.NotFound): docker_client.containers.get(container['id']) persist_pool.start_events.clear() persist_pool.stop_events.clear() persist_pool.reqid_starts.clear() persist_pool.reqid_stops.clear()
def test_expire_queue_next_out_of_order(self, redis, docker_client): res = self.start(self.pending[0]) assert res['queue'] == 0 res = self.start(self.pending[1]) assert res['queue'] == 1 res = self.start(self.pending[2]) assert res['queue'] == 2 self.remove_next(docker_client) self.remove_next(docker_client) def assert_done(): assert redis.scard('p:fixed-pool:f') == 1 sleep_try(0.2, 6.0, assert_done) res = self.start(self.pending[2]) assert res['queue'] == 2 res = self.start(self.pending[1]) assert res['containers'] res = self.start(self.pending[2]) assert res['queue'] == 1 res = self.start(self.pending[3]) assert res['queue'] == 2 res = self.start(self.pending[0]) assert res['containers'] res = self.start(self.pending[2]) assert res['queue'] == 0 res = self.start(self.pending[3]) assert res['queue'] == 1 self.pending.pop(0) self.pending.pop(0)
def test_flock_start(self, pool, redis): res = self.client.post('/api/flock/start/' + self.reqid, json={'environ': { 'NEW': 'VALUE' }}) assert res.json['containers']['box'] assert res.json['containers']['box']['environ']['NEW'] == 'VALUE' assert res.json['network'] def assert_done(): assert len(pool.start_events) == 2 sleep_try(0.2, 6.0, assert_done) for event in pool.start_events: assert event['Action'] == 'start' assert event['Actor']['Attributes'][ pool.shepherd.reqid_label] == self.reqid assert redis.exists('p:test-pool:rq:' + self.reqid) assert redis.scard('p:test-pool:f') == 1
def test_full_queue_additional(self, redis, persist_pool): assert len(persist_pool.start_events) == 6 for x in range(1, 4): res, reqid = self.do_req_and_start(persist_pool) assert res['queue'] == x - 1 assert redis.scard('p:{0}:f'.format(persist_pool.name)) == 3 assert redis.llen('p:{0}:wq'.format(persist_pool.name)) == x assert redis.scard('p:{0}:ws'.format(persist_pool.name)) == x # ensure double start doesn't move position res = self.client.post('/api/flock/start/' + reqid) assert res.json['queue'] == x - 1 for x in range(1, 10): time.sleep(2.1) llen = redis.llen('p:{0}:wq'.format(persist_pool.name)) scard = redis.scard('p:{0}:ws'.format(persist_pool.name)) assert llen in (2, 3) assert scard in (2, 3) def assert_done(): assert len(persist_pool.reqid_starts) >= 6 assert len(persist_pool.reqid_stops) >= 6 assert all(value >= 2 for value in persist_pool.reqid_starts.values()) assert all(value >= 2 for value in persist_pool.reqid_stops.values()) assert len(persist_pool.start_events) >= 14 assert len(persist_pool.stop_events) >= 10 sleep_try(0.2, 20.0, assert_done)
def test_flock_wait_expire(self, redis): def assert_done(): assert not redis.exists('p:test-pool:rq:' + self.reqid) assert redis.scard('p:test-pool:f') == 0 sleep_try(0.2, 6.0, assert_done)
def test_flock_still_running(self, redis): def assert_done(): assert redis.exists('p:test-pool:rq:' + self.reqid) assert redis.scard('p:test-pool:f') == 1 sleep_try(0.2, 6.0, assert_done)