def rm_containers(): cids = request.get_json()['cids'] if not all(len(cid) >= 7 for cid in cids): abort(400, 'must given at least 7 chars for container_id') version_dict = {} for cid in cids: container = Container.get_by_container_id(cid) if not container: continue version_dict.setdefault((container.version, container.host), []).append(container) ts, watch_keys = [], [] for (version, host), containers in version_dict.iteritems(): cids = [c.id for c in containers] task = Task.create(consts.TASK_REMOVE, version, host, {'container_ids': cids}) all_host_cids = [c.id for c in Container.get_multi_by_host(host) if c and c.version_id == version.id] need_to_delete_image = set(cids) == set(all_host_cids) remove_containers.apply_async( args=(task.id, cids, need_to_delete_image), task_id='task:%d' % task.id ) ts.append(task.id) watch_keys.append(task.result_key) return {'r': 0, 'msg': 'ok', 'tasks': ts, 'watch_keys': watch_keys}
def start_container(cid): c = Container.get_by_container_id(cid) if c: c.cure() dockerjob.start_containers([c, ], c.host) rebind_container_ip(c) return {'r': 0, 'msg': code.OK}
def stop_container(cid): c = Container.get_by_container_id(cid) if c: c.kill() dockerjob.stop_containers([c,], c.host) current_app.logger.info('Stop container (container_id=%s)', cid[:7]) return {'r': 0, 'msg': consts.OK}
def cure_container(cid): c = Container.get_by_container_id(cid) if c and not c.is_alive: rebind_container_ip(c) c.cure() current_app.logger.info('Cure container (container_id=%s)', cid[:7]) return {'r': 0, 'msg': consts.OK}
def container_log(cid): stderr = request.args.get('stderr', type=int, default=0) stdout = request.args.get('stdout', type=int, default=0) tail = request.args.get('tail', type=int, default=10) # docker client's argument if tail == 0: tail = 'all' ws = request.environ['wsgi.websocket'] container = Container.get_by_container_id(cid) if not container: ws.close() _log.info('Container %s not found, close websocket' % cid) return 'websocket closed' try: client = get_docker_client(container.host.addr) for line in client.logs(cid, stream=True, stderr=bool(stderr), stdout=bool(stdout), tail=tail): ws.send(line) except geventwebsocket.WebSocketError, e: _log.exception(e)
def rm_containers(): cids = request.get_json()['cids'] if not all(len(cid) >= 7 for cid in cids): abort(400, 'must given at least 7 chars for container_id') version_dict = {} for cid in cids: container = Container.get_by_container_id(cid) if not container: continue version_dict.setdefault((container.version, container.host), []).append(container) task_ids, watch_keys = [], [] for (version, host), containers in version_dict.iteritems(): cids = [c.id for c in containers] task = Task.create(TASK_REMOVE, version, host, {'container_ids': cids}) all_host_cids = [c.id for c in Container.get_multi_by_host(host) if c and c.version_id == version.id] need_to_delete_image = set(cids) == set(all_host_cids) remove_containers.apply_async( args=(task.id, cids, need_to_delete_image), task_id='task:%d' % task.id ) task_ids.append(task.id) watch_keys.append(task.result_key) return {'tasks': task_ids, 'watch_keys': watch_keys}
def start_container(cid): c = Container.get_by_container_id(cid) if c and not c.is_alive: c.cure() dockerjob.start_containers([c, ], c.host) rebind_container_ip(c) current_app.logger.info('Start container (container_id=%s)', cid[:7]) return {'r': 0, 'msg': consts.OK}
def _clean_failed_containers(cid): # 清理掉失败的容器, 释放核, 释放ip _log.info('Cleaning failed container (cid=%s)', cid) container = Container.get_by_container_id(cid) if not container: return dockerjob.remove_container_by_cid([cid], container.host) container.delete()
def kill_container(cid): c = Container.get_by_container_id(cid) if c: c.kill() key = consts.ERU_AGENT_DIE_REASON % c.container_id r = rds.get(key) rds.delete(key) if r is not None: c.set_props({'oom': 1}) current_app.logger.info('Kill container (container_id=%s)', cid[:7]) return {'r': 0, 'msg': consts.OK}
def rm_containers(): cids = request.get_json()["cids"] version_dict = {} ts, watch_keys = [], [] for cid in cids: container = Container.get_by_container_id(cid) if not container: continue version_dict.setdefault((container.version, container.host), []).append(container) for (version, host), containers in version_dict.iteritems(): cids = [c.id for c in containers] task_props = {"container_ids": cids} task = Task.create(consts.TASK_REMOVE, version, host, task_props) remove_containers.apply_async(args=(task.id, cids, False), task_id="task:%d" % task.id) ts.append(task.id) watch_keys.append(task.result_key) return {"r": 0, "msg": "ok", "tasks": ts, "watch_keys": watch_keys}
def migrate_container(container_id, need_to_remove=True): container = Container.get_by_container_id(container_id) if not container: _log.error('container %s is not found, ignore migration', container_id) return ncore, nshare = container.host.pod.get_core_allocation(container.ncore) host_cores = average_schedule(container.host.pod, 1, ncore, nshare, None) if not host_cores: _log.error('not enough cores to migrate') return cids = [container.id] spec_ips = cidrs = container.get_ips() (host, container_count), cores = next(host_cores.iteritems()) props = { 'ncontainer': 1, 'entrypoint': container.entrypoint, 'env': container.env, 'full_cores': [c.label for c in cores.get('full', [])], 'part_cores': [c.label for c in cores.get('part', [])], 'ports': None, 'args': None, 'nshare': nshare, 'networks': cidrs, 'image': None, 'route': '', 'callback_url': container.callback_url, 'container_ids': cids, } task = Task.create(consts.TASK_MIGRATE, container.version, host, props) if not task: _log.error('create migrate task error') return _log.info('start migration...') if need_to_remove: remove_containers.apply(args=(task.id, cids, False), task_id='task:%s' % task.id) create_containers.apply(args=(task.id, 1, nshare, cores, cidrs, spec_ips), task_id='task:%s' % task.id) _log.info('migration done')
def rm_containers(): cids = request.get_json()['cids'] version_dict = {} ts, watch_keys = [], [] for cid in cids: container = Container.get_by_container_id(cid) if not container: continue version_dict.setdefault((container.version, container.host), []).append(container) for (version, host), containers in version_dict.iteritems(): cids = [c.id for c in containers] task_props = {'container_ids': cids} task = Task.create(consts.TASK_REMOVE, version, host, task_props) remove_containers.apply_async( args=(task.id, cids, False), task_id='task:%d' % task.id ) ts.append(task.id) watch_keys.append(task.result_key) return {'r': 0, 'msg': 'ok', 'tasks': ts, 'watch_keys': watch_keys}
def migrate_container(container_id, need_to_remove=True): container = Container.get_by_container_id(container_id) if not container: _log.error('container %s is not found, ignore migration', container_id) return ncore, nshare= container.host.pod.get_core_allocation(container.ncore) host_cores = average_schedule(container.host.pod, 1, ncore, nshare, None) if not host_cores: _log.error('not enough cores to migrate') return cids = [container.id] spec_ips = cidrs = container.get_ips() (host, container_count), cores = next(host_cores.iteritems()) props = { 'ncontainer': 1, 'entrypoint': container.entrypoint, 'env': container.env, 'full_cores': [c.label for c in cores.get('full', [])], 'part_cores': [c.label for c in cores.get('part', [])], 'ports': None, 'args': None, 'nshare': nshare, 'networks': cidrs, 'image': None, 'route': '', 'callback_url': container.callback_url, 'container_ids': cids, } task = Task.create(consts.TASK_MIGRATE, container.version, host, props) if not task: _log.error('create migrate task error') return _log.info('start migration...') if need_to_remove: remove_containers.apply(args=(task.id, cids, False), task_id='task:%s' % task.id) create_containers.apply(args=(task.id, 1, nshare, cores, cidrs, spec_ips), task_id='task:%s' % task.id) _log.info('migration done')
def container_log(cid): stderr = request.args.get('stderr', type=int, default=0) stdout = request.args.get('stdout', type=int, default=0) tail = request.args.get('tail', type=int, default=10) # docker client's argument if tail == 0: tail = 'all' ws = request.environ['wsgi.websocket'] container = Container.get_by_container_id(cid) if not container: ws.close() logger.info('Container %s not found, close websocket' % cid) return 'websocket closed' try: client = get_docker_client(container.host.addr) for line in client.logs(cid, stream=True, stderr=bool(stderr), stdout=bool(stdout), tail=tail): ws.send(line) except geventwebsocket.WebSocketError, e: logger.exception(e)
def bind_network(cid): data = request.get_json() appname = data.get('appname') c = Container.get_by_container_id(cid) if not (c and c.is_alive): raise EruAbortException(consts.HTTP_NOT_FOUND, 'Container %s not found' % cid) if c.appname != appname: raise EruAbortException(consts.HTTP_NOT_FOUND, 'Container does not belong to app') network_names = data.get('networks', []) networks = filter(None, [Network.get_by_name(n) for n in network_names]) if not networks: raise EruAbortException(consts.HTTP_BAD_REQUEST, 'network empty') ips = filter(None, [n.acquire_ip() for n in networks]) if not ips: raise EruAbortException(consts.HTTP_BAD_REQUEST, 'no ip available') nid = max([ip.network_id for ip in c.ips.all()] + [-1]) + 1 bind_container_ip(c, ips, nid=nid) for ip in ips: ip.assigned_to_container(c) return {'r': 0, 'msg': ips}
def rm_containers(): cids = request.get_json()['cids'] if not all(len(cid) >= 7 for cid in cids): raise EruAbortException(consts.HTTP_BAD_REQUEST, 'must given at least 7 chars for container_id') version_dict = {} ts, watch_keys = [], [] for cid in cids: container = Container.get_by_container_id(cid) if not container: continue version_dict.setdefault((container.version, container.host), []).append(container) for (version, host), containers in version_dict.iteritems(): cids = [c.id for c in containers] task_props = {'container_ids': cids} task = Task.create(consts.TASK_REMOVE, version, host, task_props) remove_containers.apply_async( args=(task.id, cids, False), task_id='task:%d' % task.id ) ts.append(task.id) watch_keys.append(task.result_key) return {'r': 0, 'msg': 'ok', 'tasks': ts, 'watch_keys': watch_keys}
def stop_container(cid): c = Container.get_by_container_id(cid) if c: c.kill() dockerjob.stop_containers([c,], c.host) return {'r':0, 'msg': code.OK}
def cure_container(cid): c = Container.get_by_container_id(cid) if c and not c.is_alive: rebind_container_ip(c) c.cure() return {'r': 0, 'msg': code.OK}
def kill_container(cid): c = Container.get_by_container_id(cid) if c: c.kill() current_app.logger.info('Kill container (container_id=%s)', cid[:7]) return {'r': 0, 'msg': consts.OK}
def poll_container(cid): c = Container.get_by_container_id(cid) if not c: raise EruAbortException(consts.HTTP_NOT_FOUND, 'Container %s not found' % cid) return {'r': 0, 'container': c.container_id, 'status': c.is_alive}
def get_container_by_cid(cid): return Container.get_by_container_id(cid)
def remove_container(cid): c = Container.get_by_container_id(cid) if not c: return {'r': 1, 'msg': 'container %s not found' % cid} dockerjob.remove_container_by_cid([cid], c.host) return {'r': 0, 'msg': consts.OK}
def get_container_by_cid(cid): c = Container.get_by_container_id(cid) if not c: raise EruAbortException(consts.HTTP_NOT_FOUND, 'Container %s not found' % cid) return c
def kill_container(cid): c = Container.get_by_container_id(cid) if c: c.kill() return {'r':0, 'msg': code.OK}
def cure_container(cid): c = Container.get_by_container_id(cid) if c: c.cure() return {'r':0, 'msg': code.OK}