def _get_pod(id_or_name): if id_or_name.isdigit(): pod = Pod.get(int(id_or_name)) else: pod = Pod.get_by_name(id_or_name) if not pod: abort(404, 'Pod %s not found' % id_or_name) return pod
def get_pod(id_or_name): if id_or_name.isdigit(): pod = Pod.get(int(id_or_name)) else: pod = Pod.get_by_name(id_or_name) if not pod: raise EruAbortException(consts.HTTP_NOT_FOUND, 'Pod %s not found' % id_or_name) return pod
def list_pod_hosts(id_or_name): if id_or_name.isdigit(): pod = Pod.get(int(id_or_name)) else: pod = Pod.get_by_name(id_or_name) if not pod: raise EruAbortException(consts.HTTP_NOT_FOUND, 'Pod %s not found' % id_or_name) show_all = request.args.get('all', type=bool, default=False) return pod.list_hosts(g.start, g.limit, show_all=show_all)
def test_group_pod(test_db): p1 = Pod.create('pod1', 'pod1') p2 = Pod.create('pod1', 'pod1') assert p1 is not None assert p1.name == 'pod1' assert p2 is None p3 = Pod.get_by_name('pod1') assert p3.id == p1.id assert p3.get_free_public_hosts(10) == [] assert get_max_container_count(p3, 1, 2) == 0 assert centralized_schedule(p3, 1, 1, 2) == {}
def create_local_test_data(private=False): appyaml = { 'appname': 'blueberry', 'entrypoints': { 'web': { 'cmd': 'python app.py', 'ports': ['5000/tcp'], }, 'daemon': { 'cmd': 'python daemon.py', }, 'service': { 'cmd': 'python service.py' }, }, 'build': 'pip install -r ./requirements.txt', } app = App.get_or_create('blueberry', 'http://git.hunantv.com/tonic/blueberry.git') version = app.add_version('abe23812aeb50a17a2509c02a28423462161d306') appconfig = version.appconfig appconfig.update(**appyaml) appconfig.save() pod = Pod.create('pod', 'pod') c = docker.Client(**kwargs_from_env(assert_hostname=False)) r = c.info() host = Host.create(pod, '192.168.59.103:2376', r['Name'], r['ID'], r['NCPU'], r['MemTotal']) if not private: host.set_public() return app, version, pod, host
def create_host(): """为了文件, 只好不用json了""" addr = request.form.get('addr', default='') pod_name = request.form.get('pod_name', default='') if not (addr and pod_name): abort(400, 'Need addr and pod_name') pod = Pod.get_by_name(pod_name) if not pod: abort(400, 'No pod found') # 存证书, 没有就算了 if all(k in request.files for k in ['ca', 'cert', 'key']): try: ca, cert, key = request.files['ca'], request.files['cert'], request.files['key'] save_docker_certs(addr.split(':', 1)[0], ca.read(), cert.read(), key.read()) finally: ca.close() cert.close() key.close() try: client = get_docker_client(addr, force_flush=True) info = client.info() except Exception as e: abort(400, 'Docker daemon error on host %s, error: %s' % (addr, e.message)) if not Host.create(pod, addr, info['Name'], info['ID'], info['NCPU'], info['MemTotal']): abort(400, 'Host create error.') return 201, {'r':0, 'msg': consts.OK}
def _create_data(core_share, max_share_core, host_count): group = Group.create('group', 'group') pod = Pod.create('pod', 'pod', core_share, max_share_core) for _ in range(host_count): host = Host.create(pod, random_ipv4(), random_string(), random_uuid(), 16, 4096) host.assigned_to_group(group) return group, pod
def create_host(): """为了文件, 只好不用json了""" addr = request.form.get('addr', type=str, default='') pod_name = request.form.get('pod_name', type=str, default='') if not (addr and pod_name): raise EruAbortException(consts.HTTP_BAD_REQUEST, 'need addr and pod_name') pod = Pod.get_by_name(pod_name) if not pod: raise EruAbortException(consts.HTTP_BAD_REQUEST, 'No pod found') # 存证书, 没有就算了 try: ca, cert, key = request.files['ca'], request.files['cert'], request.files['key'] save_docker_certs(addr.split(':', 1)[0], ca.read(), cert.read(), key.read()) finally: ca.close() cert.close() key.close() try: client = get_docker_client(addr, force_flush=True) info = client.info() except Exception: raise EruAbortException(consts.HTTP_BAD_REQUEST, 'Docker daemon error on host %s' % addr) if not Host.create(pod, addr, info['Name'], info['ID'], info['NCPU'], info['MemTotal']): raise EruAbortException(consts.HTTP_BAD_REQUEST) return consts.HTTP_CREATED, {'r':0, 'msg': consts.OK}
def create_host(): """为了文件, 只好不用json了""" addr = request.form.get('addr', default='') ip = addr.split(':', 1)[0] podname = request.form.get('podname', default='') is_public = request.form.get('is_public', default=False, type=bool) if not (addr and podname): abort(400, 'Bad addr or podname: addr="{}", podname="{}"'.format(addr, podname)) pod = Pod.get_by_name(podname) if not pod: abort(400, 'Pod {} not found'.format(podname)) # 存证书, 没有就算了 certs = ['ca', 'cert', 'key'] if all(k in request.files for k in certs): certs_contents = tuple(request.files[f].read() for f in certs) save_docker_certs(ip, *certs_contents) try: client = get_docker_client(addr, force_flush=True) info = client.info() except Exception as e: abort(400, 'Docker daemon error on host %s, error: %s' % (addr, e.message)) if not Host.create(pod, addr, info['Name'], info['ID'], info['NCPU'], info['MemTotal'], is_public=is_public): abort(400, 'Error while creating host') return 201, DEFAULT_RETURN_VALUE
def create_local_test_data(private=False): appyaml = { 'appname': 'blueberry', 'entrypoints': { 'web': { 'cmd': 'python app.py', 'ports': ['5000/tcp'], }, 'daemon': { 'cmd': 'python daemon.py', }, 'service': { 'cmd': 'python service.py' }, }, 'build': 'pip install -r ./requirements.txt', } app = App.get_or_create('blueberry', 'http://git.hunantv.com/tonic/blueberry.git', 'token') version = app.add_version('abe23812aeb50a17a2509c02a28423462161d306') appconfig = version.appconfig appconfig.update(**appyaml) appconfig.save() group = Group.create('group', 'group') pod = Pod.create('pod', 'pod') pod.assigned_to_group(group) c = docker.Client(**kwargs_from_env(assert_hostname=False)) r = c.info() host = Host.create(pod, '192.168.59.103:2376', r['Name'], r['ID'], r['NCPU'], r['MemTotal']) if private: host.assigned_to_group(group) return app, version, group, pod, host
def test_container_release_cores(test_db): a = App.get_or_create('app', 'http://git.hunantv.com/group/app.git') v = a.add_version(random_sha1()) p = Pod.create('pod', 'pod', 10, -1) host = Host.create(p, random_ipv4(), random_string(), random_uuid(), 200, 0) for core in host.cores: assert core.host_id == host.id assert core.remain == 10 containers = [] cores = sorted(host.cores, key=operator.attrgetter('label')) for fcores, pcores in zip(chunked(cores[:100], 10), chunked(cores[100:], 10)): used_cores = {'full': fcores, 'part': pcores} host.occupy_cores(used_cores, 5) c = Container.create(random_sha1(), host, v, random_string(), 'entrypoint', used_cores, 'env', nshare=5) containers.append(c) cores = sorted(host.cores, key=operator.attrgetter('label')) for fcores, pcores in zip(chunked(cores[:100], 10), chunked(cores[100:], 10)): for core in fcores: assert core.remain == 0 for core in pcores: assert core.remain == 5 for c in containers: c.delete() cores = sorted(host.cores, key=operator.attrgetter('label')) for fcores, pcores in zip(chunked(cores[:100], 10), chunked(cores[100:], 10)): for core in fcores: assert core.remain == 10 for core in pcores: assert core.remain == 10
def test_pod(test_db): p1 = Pod.create('p1', 'p1', core_share=10) assert p1 is not None assert p1.get_core_allocation(1) == (1, 0) assert p1.get_core_allocation(1.5) == (1, 5) assert p1.get_core_allocation(2) == (2, 0) assert p1.get_core_allocation(2.8) == (2, 8) assert p1.get_core_allocation(0.8) == (0, 8) assert p1.get_core_allocation(0.1) == (0, 1) p2 = Pod.create('p2', 'p2', core_share=100) assert p2 is not None assert p2.get_core_allocation(1) == (1, 0) assert p2.get_core_allocation(1.5) == (1, 50) assert p2.get_core_allocation(2) == (2, 0) assert p2.get_core_allocation(2.8) == (2, 80) assert p2.get_core_allocation(0.81) == (0, 81) assert p2.get_core_allocation(0.14) == (0, 14)
def create_pod(): data = request.get_json() if not Pod.create( data['name'], data.get('description', ''), data.get('core_share', DEFAULT_CORE_SHARE), data.get('max_share_core', DEFAULT_MAX_SHARE_CORE), ): raise EruAbortException(code.HTTP_BAD_REQUEST) return {'r':0, 'msg': code.OK}
def get_pod_resource(pod_id): pod = Pod.get(pod_id) if not pod: raise EruAbortException(code.HTTP_NOT_FOUND, 'Pod %s not found' % pod_id) core_count = sum(len(h.cores.all()) for h in pod.hosts.all()) free_cores = [c for h in pod.hosts.all() for c in h.get_free_cores()] return { 'core_count': core_count, 'free_cores': [c.label for c in free_cores], }
def test_get_max_container_count_single_host(test_db): pod = Pod.create('pod', 'pod', 10, -1) Host.create(pod, random_ipv4(), random_string(), random_uuid(), 64, 4096) assert get_max_container_count(pod, ncore=1, nshare=0) == 64 assert get_max_container_count(pod, ncore=2, nshare=0) == 32 assert get_max_container_count(pod, ncore=3, nshare=0) == 21 assert get_max_container_count(pod, ncore=4, nshare=0) == 16 assert get_max_container_count(pod, ncore=5, nshare=0) == 12 assert get_max_container_count(pod, ncore=1, nshare=5) == 42 assert get_max_container_count(pod, ncore=2, nshare=5) == 25
def assign_pod_to_group(pod_name): data = request.get_json() group = Group.get_by_name(data['group_name']) pod = Pod.get_by_name(pod_name) if not group or not pod: raise EruAbortException(code.HTTP_BAD_REQUEST) if not pod.assigned_to_group(group): raise EruAbortException(code.HTTP_BAD_REQUEST) return {'r':0, 'msg': code.OK}
def create_pod(): data = request.get_json() if not Pod.create( data['name'], data.get('description', ''), data.get('core_share', DEFAULT_CORE_SHARE), data.get('max_share_core', DEFAULT_MAX_SHARE_CORE), ): abort(400, 'Pod create failed') _log.info('Pod create succeeded (name=%s, desc=%s)', data['name'], data.get('description', '')) return 201, DEFAULT_RETURN_VALUE
def create_pod(): data = request.get_json() if not Pod.create( data['name'], data.get('description', ''), data.get('core_share', DEFAULT_CORE_SHARE), data.get('max_share_core', DEFAULT_MAX_SHARE_CORE), ): abort(400, 'Pod create failed') current_app.logger.info('Pod create succeeded (name=%s, desc=%s)', data['name'], data.get('description', '')) return 201, {'r':0, 'msg': consts.OK}
def group_max_containers(group_name): pod_name = request.args.get('pod_name', type=str, default='') cores_per_container = request.args.get('ncore', type=int, default=1) group = Group.get_by_name(group_name) if not group: raise EruAbortException(code.HTTP_BAD_REQUEST) pod = Pod.get_by_name(pod_name) if not pod: raise EruAbortException(code.HTTP_BAD_REQUEST) return {'r':0, 'msg': code.OK, 'data': group.get_max_containers(pod, cores_per_container)}
def create_pod(): data = request.get_json() if not Pod.create( data['name'], data.get('description', ''), data.get('core_share', DEFAULT_CORE_SHARE), data.get('max_share_core', DEFAULT_MAX_SHARE_CORE), ): raise EruAbortException(consts.HTTP_BAD_REQUEST, 'Pod create failed') current_app.logger.info('Pod create succeeded (name=%s, desc=%s)', data['name'], data.get('description', '')) return consts.HTTP_CREATED, {'r':0, 'msg': consts.OK}
def test_container_transform(test_db): a = App.get_or_create('app', 'http://git.hunantv.com/group/app.git', '') assert a is not None v = a.add_version(random_sha1()) v2 = a.add_version(random_sha1()) assert v is not None assert v.app.id == a.id assert v.name == a.name assert len(v.containers.all()) == 0 assert len(v.tasks.all()) == 0 g = Group.create('group', 'group') p = Pod.create('pod', 'pod') assert p.assigned_to_group(g) hosts = [Host.create(p, random_ipv4(), random_string(prefix='host'), random_uuid(), 4, 4096) for i in range(6)] for host in hosts[:3]: host.assigned_to_group(g) assert g.get_max_containers(p, 3, 0) == 3 host_cores = g.get_free_cores(p, 3, 3, 0) assert len(host_cores) == 3 containers = [] for (host, count), cores in host_cores.iteritems(): cores_per_container = len(cores) / count for i in range(count): cid = random_sha1() used_cores = {'full': cores['full'][i*cores_per_container:(i+1)*cores_per_container]} c = Container.create(cid, host, v, random_string(), 'entrypoint', used_cores, 'env') assert c is not None containers.append(c) host.occupy_cores(cores, 0) for host in g.private_hosts.all(): assert len(host.get_free_cores()[0]) == 1 assert len(host.containers.all()) == 1 assert host.count == 1 assert len(containers) == 3 assert len(v.containers.all()) == 3 cids = [c.container_id for c in containers] for c in containers: host = c.host cid = c.container_id c.transform(v2, random_sha1(), random_string()) assert c.container_id != cid new_cids = [c.container_id for c in containers] assert new_cids != cids
def test_network(test_db): n = Network.create('net', '10.1.0.0/16') assert n is not None assert len(n.ips.all()) == 0 assert n.hostmask_string == '16' assert n.pool_size == 65436 assert n.used_count == 0 assert n.used_gate_count == 0 assert n.gate_pool_size == 100 ip = n.acquire_ip() assert ip is not None assert ip.network_id == n.id assert ip.vethname == '' assert not ip.container_id assert ip.hostmask == n.hostmask_string assert ip.vlan_seq_id == n.id assert ip.address.startswith('10.1') assert len(n.ips.all()) == 1 assert n.pool_size == 65435 assert n.used_count == 1 ip.release() assert len(n.ips.all()) == 0 assert n.pool_size == 65436 assert n.used_count == 0 p = Pod.create('pod', 'pod', 10, -1) host = Host.create(p, random_ipv4(), random_string(prefix='host'), random_uuid(), 4, 4096) gate = n.acquire_gateway_ip(host) assert gate is not None assert gate.network_id == n.id assert gate.vlan_address.startswith('10.1.0.') assert gate.vlan_seq_id == n.id assert gate.name == 'vlan.%02d.br' % n.id g = VLanGateway.get_by_host_and_network(host.id, n.id) assert g is not None assert g.id == gate.id assert len(host.list_vlans()) == 1 assert n.used_gate_count == 1 assert n.gate_pool_size == 99 gate.release() assert n.used_gate_count == 0 assert n.gate_pool_size == 100 assert VLanGateway.get_by_host_and_network(host.id, n.id) is None assert len(host.list_vlans()) == 0
def test_get_max_container_count_single_host(test_db): group = Group.create('group', 'group') pod = Pod.create('pod', 'pod', 10, -1) host = Host.create(pod, random_ipv4(), random_string(), random_uuid(), 64, 4096) host.assigned_to_group(group) assert get_max_container_count(group, pod, ncore=1, nshare=0) == 64 assert get_max_container_count(group, pod, ncore=2, nshare=0) == 32 assert get_max_container_count(group, pod, ncore=3, nshare=0) == 21 assert get_max_container_count(group, pod, ncore=4, nshare=0) == 16 assert get_max_container_count(group, pod, ncore=5, nshare=0) == 12 assert get_max_container_count(group, pod, ncore=1, nshare=5) == 42 assert get_max_container_count(group, pod, ncore=2, nshare=5) == 25
def get_pod_resource(pod_id): pod = Pod.get(pod_id) if not pod: abort(404, 'Pod %s not found' % pod_id) core_count = sum(len(h.cores) for h in pod.hosts.all()) free_excluded_cores = [c for h in pod.hosts.all() for c in h.get_free_cores()[0]] free_shared_cores = [c for h in pod.hosts.all() for c in h.get_free_cores()[1]] return { 'core_count': core_count, 'free_excluded_cores': [c.label for c in free_excluded_cores], 'free_shared_cores': [c.label for c in free_shared_cores], }
def group_max_containers(group_name): pod_name = request.args.get('pod_name', default='') core_require = request.args.get('ncore', type=float, default=1) group = Group.get_by_name(group_name) if not group: abort(400, 'No group found') pod = Pod.get_by_name(pod_name) if not pod: abort(400, 'No pod found') ncore, nshare = pod.get_core_allocation(core_require) return {'r':0, 'msg': consts.OK, 'data': get_max_container_count(group, pod, ncore, nshare)}
def assign_pod_to_group(pod_name): data = request.get_json() group = Group.get_by_name(data['group_name']) pod = Pod.get_by_name(pod_name) if not group or not pod: abort(404, 'No group/pod found') if not pod.assigned_to_group(group): abort(400, 'Assign failed') current_app.logger.info('Pod (name=%s) assigned to group (name=%s)', pod_name, data['group_name']) return {'r':0, 'msg': consts.OK}
def assign_pod_to_group(pod_name): data = request.get_json() group = Group.get_by_name(data['group_name']) pod = Pod.get_by_name(pod_name) if not group or not pod: raise EruAbortException(consts.HTTP_BAD_REQUEST, 'No group/pod found') if not pod.assigned_to_group(group): raise EruAbortException(consts.HTTP_BAD_REQUEST, 'Assign failed') current_app.logger.info('Pod (name=%s) assigned to group (name=%s)', pod_name, data['group_name']) return {'r':0, 'msg': consts.OK}
def create_host(): data = request.get_json() addr = data['addr'] pod = Pod.get_by_name(data['pod_name']) if not pod: raise EruAbortException(code.HTTP_BAD_REQUEST) client = get_docker_client(addr) info = client.info() if not Host.create(pod, addr, info['Name'], info['ID'], info['NCPU'], info['MemTotal']): raise EruAbortException(code.HTTP_BAD_REQUEST) return {'r':0, 'msg': code.OK}
def test_group_pod(test_db): g1 = Group.create('group1', 'group1') g2 = Group.create('group1', 'group1') assert g1 is not None assert g1.name == 'group1' assert g2 is None p1 = Pod.create('pod1', 'pod1') p2 = Pod.create('pod1', 'pod1') assert p1 is not None assert p1.name == 'pod1' assert p2 is None g3 = Group.get_by_name('group1') assert g3.id == g1.id p3 = Pod.get_by_name('pod1') assert p3.id == p1.id assert p3.assigned_to_group(g3) assert p3.get_free_public_hosts(10) == [] assert g3.get_max_containers(p3, 1, 2) == 0 assert g3.get_free_cores(p3, 1, 1, 2) == {}
def test_occupy_and_release_cores(test_db): g = Group.create('group', 'group') p = Pod.create('pod', 'pod', 10, -1) host = Host.create(p, random_ipv4(), random_string(), random_uuid(), 200, 0) assert p.assigned_to_group(g) assert host.assigned_to_group(g) for core in host.cores: assert core.host_id == host.id assert core.remain == 10 cores = { 'full': host.cores[:100], 'part': host.cores[100:], } host.occupy_cores(cores, 5) for core in host.cores[:100]: assert core.remain == 0 for core in host.cores[100:]: assert core.remain == 5 host.release_cores(cores, 5) for core in host.cores: assert core.remain == 10 host.occupy_cores(cores, 8) for core in host.cores[:100]: assert core.remain == 0 for core in host.cores[100:]: assert core.remain == 2 host.release_cores(cores, 8) for core in host.cores: assert core.remain == 10 cores = { 'full': host.cores[:50], 'part': host.cores[50:100], } host.occupy_cores(cores, 8) for core in host.cores[:50]: assert core.remain == 0 for core in host.cores[50:100]: assert core.remain == 2 host.release_cores(cores, 8) for core in host.cores: assert core.remain == 10
def test_host(test_db): g = Group.create('group', 'group') p = Pod.create('pod', 'pod') assert p.assigned_to_group(g) hosts = [Host.create(p, random_ipv4(), random_string(prefix='host'), random_uuid(), 4, 4096) for i in range(6)] for host in hosts: assert host is not None assert len(host.cores.all()) == 4 assert len(host.get_free_cores()) == 4 assert len(g.private_hosts.all()) == 0 assert g.get_max_containers(p, 1) == 0 assert g.get_free_cores(p, 1, 1) == {} for host in hosts[:3]: host.assigned_to_group(g) host_ids1 = {h.id for h in hosts[:3]} host_ids2 = {h.id for h in hosts[3:]} assert len(g.private_hosts.all()) == 3 assert g.get_max_containers(p, 1) == 12 host_cores = g.get_free_cores(p, 12, 1) assert len(host_cores) == 3 for (host, count), cores in host_cores.iteritems(): assert host.id in host_ids1 assert host.id not in host_ids2 assert count == 4 assert len(cores) == 4 assert g.get_max_containers(p, 3) == 3 host_cores = g.get_free_cores(p, 3, 3) assert len(host_cores) == 3 for (host, count), cores in host_cores.iteritems(): assert host.id in host_ids1 assert host.id not in host_ids2 assert count == 1 assert len(cores) == 3 assert g.get_max_containers(p, 2) == 6 host_cores = g.get_free_cores(p, 4, 2) assert len(host_cores) == 2 for (host, count), cores in host_cores.iteritems(): assert host.id in host_ids1 assert host.id not in host_ids2 assert count == 2 assert len(cores) == 4
def test_occupy_and_release_cores(test_db): p = Pod.create('pod', 'pod', 10, -1) host = Host.create(p, random_ipv4(), random_string(), random_uuid(), 200, 0) for core in host.cores: assert core.host_id == host.id assert core.remain == 10 cores = { 'full': host.cores[:100], 'part': host.cores[100:], } host.occupy_cores(cores, 5) for core in host.cores[:100]: assert core.remain == 0 for core in host.cores[100:]: assert core.remain == 5 host.release_cores(cores, 5) for core in host.cores: assert core.remain == 10 host.occupy_cores(cores, 8) for core in host.cores[:100]: assert core.remain == 0 for core in host.cores[100:]: assert core.remain == 2 host.release_cores(cores, 8) for core in host.cores: assert core.remain == 10 cores = { 'full': host.cores[:50], 'part': host.cores[50:100], } host.occupy_cores(cores, 8) for core in host.cores[:50]: assert core.remain == 0 for core in host.cores[50:100]: assert core.remain == 2 host.release_cores(cores, 8) for core in host.cores: assert core.remain == 10
def create_test_suite(): appyaml = { 'appname': 'app', 'entrypoints': { 'web': { 'cmd': 'python app.py', 'ports': ['5000/tcp'], }, 'daemon': { 'cmd': 'python daemon.py', }, 'service': { 'cmd': 'python service.py' }, }, 'build': 'pip install -r ./requirements.txt', } app = App.get_or_create('app', 'http://git.hunantv.com/group/app.git') version = app.add_version(random_sha1()) appconfig = version.appconfig appconfig.update(**appyaml) appconfig.save() pod = Pod.create('pod', 'pod') hosts = [ Host.create(pod, random_ipv4(), random_string(prefix='host'), random_uuid(), 4, 4096) for i in range(4) ] containers = [] for (host, count), cores in centralized_schedule(pod, 4, 4, 0).iteritems(): cores_per_container = len(cores) / count for i in range(count): cid = random_sha1() used_cores = { 'full': cores['full'][i * cores_per_container:(i + 1) * cores_per_container] } c = Container.create(cid, host, version, random_string(), 'web', used_cores, 'env') containers.append(c) host.occupy_cores(cores, 0) return app, version, pod, hosts, containers
def _get_instances(podname, appname, version, **kwargs): pod = Pod.get_by_name(podname) if not pod: abort(400, 'Pod `%s` not found' % podname) app, version = _get_app_and_version(appname, version) return pod, app, version
def test_container(test_db): a = App.get_or_create('app', 'http://git.hunantv.com/group/app.git') assert a is not None assert a.id == a.user_id v = a.add_version(random_sha1()) assert v is not None assert v.app.id == a.id assert v.name == a.name assert len(v.containers.all()) == 0 assert len(v.tasks.all()) == 0 p = Pod.create('pod', 'pod', 10, -1) hosts = [ Host.create(p, random_ipv4(), random_string(prefix='host'), random_uuid(), 4, 4096) for i in range(6) ] for host in hosts[3:]: host.set_public() host_ids1 = {h.id for h in hosts[:3]} host_ids2 = {h.id for h in hosts[3:]} host_cores = centralized_schedule(p, 3, 3, 0) #测试没有碎片核的情况 #获取核 containers = [] for (host, count), cores in host_cores.iteritems(): host.occupy_cores(cores, 0) cores_per_container = len(cores['full']) / count for i in range(count): cid = random_sha1() used_cores = { 'full': cores['full'][i * cores_per_container:(i + 1) * cores_per_container] } c = Container.create(cid, host, v, random_string(), 'entrypoint', used_cores, 'env', nshare=0) assert c is not None containers.append(c) for host in p.get_private_hosts(): full_cores, part_cores = host.get_free_cores() assert len(full_cores) == 1 assert len(part_cores) == 0 assert len(host.containers.all()) == 1 assert host.count == 1 assert len(containers) == 3 assert len(v.containers.all()) == 3 for c in containers: assert c.host_id in host_ids1 assert c.host_id not in host_ids2 assert c.app.id == a.id assert c.version.id == v.id assert c.is_alive assert len(c.full_cores) == 3 assert len(c.part_cores) == 0 all_core_labels = sorted([ '0', '1', '2', '3', ]) used_full_core_labels = [core.label for core in c.full_cores] used_part_core_labels = [core.label for core in c.part_cores] free_core_labels = [core.label for core in c.host.get_free_cores()[0]] assert all_core_labels == sorted(used_full_core_labels + used_part_core_labels + free_core_labels) #释放核 for c in containers: c.delete() assert len(v.containers.all()) == 0 assert get_max_container_count(p, 3, 0) == 3 host_cores = centralized_schedule(p, 3, 3, 0) assert len(host_cores) == 3 for host in p.get_private_hosts(): full_cores, part_cores = host.get_free_cores() assert len(full_cores) == 4 assert len(part_cores) == 0 assert len(host.containers.all()) == 0 assert host.count == 4 #测试有碎片的情况 #获取核 host_cores = centralized_schedule(p, 3, 3, 4) containers = [] for (host, count), cores in host_cores.iteritems(): cores_per_container = len(cores['full']) / count host.occupy_cores(cores, 4) for i in range(count): cid = random_sha1() used_cores = { 'full': cores['full'][i * cores_per_container:(i + 1) * cores_per_container], 'part': cores['part'] } # not using a port c = Container.create(cid, host, v, random_string(), 'entrypoint', used_cores, 'env', nshare=4) assert c is not None containers.append(c) for host in p.get_private_hosts(): full_cores, part_cores = host.get_free_cores() assert len(full_cores) == 0 assert len(part_cores) == 1 assert part_cores[0].remain == 6 assert len(host.containers.all()) == 1 assert host.count == D('0.6') assert len(containers) == 3 assert len(v.containers.all()) == 3 for c in containers: assert c.host_id in host_ids1 assert c.host_id not in host_ids2 assert c.app.id == a.id assert c.version.id == v.id assert c.is_alive assert len(c.full_cores) == 3 assert len(c.part_cores) == 1 all_core_labels = sorted([ '0', '1', '2', '3', ]) used_full_core_labels = [core.label for core in c.full_cores] used_part_core_labels = [core.label for core in c.part_cores] free_core_labels = [core.label for core in c.host.get_free_cores()[0]] assert all_core_labels == sorted(used_full_core_labels + used_part_core_labels + free_core_labels) #释放核 for c in containers: c.delete() assert len(v.containers.all()) == 0 assert get_max_container_count(p, 3, 0) == 3 host_cores = centralized_schedule(p, 3, 3, 0) assert len(host_cores) == 3 for host in p.get_private_hosts(): full_cores, part_cores = host.get_free_cores() assert len(full_cores) == 4 assert len(host.containers.all()) == 0 assert host.count == 4 #获取 host_cores = centralized_schedule(p, 6, 1, 5) containers = [] for (host, count), cores in host_cores.iteritems(): cores_per_container = len(cores['full']) / count for i in range(count): cid = random_sha1() used_cores = { 'full': cores['full'][i * cores_per_container:(i + 1) * cores_per_container], 'part': cores['part'][i:i + 1], } host.occupy_cores(used_cores, 5) # not using a port c = Container.create(cid, host, v, random_string(), 'entrypoint', used_cores, 'env', nshare=5) assert c is not None containers.append(c) for host in p.get_private_hosts(): full_cores, part_cores = host.get_free_cores() assert len(full_cores) == 1 assert len(part_cores) == 0 assert len(host.containers.all()) == 2 assert host.count == D('1') assert len(containers) == 6 assert len(v.containers.all()) == 6 for c in containers: assert c.host_id in host_ids1 assert c.host_id not in host_ids2 assert c.app.id == a.id assert c.version.id == v.id assert c.is_alive assert len(c.full_cores) == 1 assert len(c.part_cores) == 1 ##释放核 for c in containers: c.delete() assert len(v.containers.all()) == 0 assert get_max_container_count(p, 3, 0) == 3 host_cores = centralized_schedule(p, 3, 3, 0) assert len(host_cores) == 3 for host in p.get_private_hosts(): full_cores, part_cores = host.get_free_cores() assert len(full_cores) == 4 assert len(part_cores) == 0 assert len(host.containers.all()) == 0 assert host.count == 4
def _create_data(core_share, max_share_core, host_count): pod = Pod.create('pod', 'pod', core_share, max_share_core) for _ in range(host_count): Host.create(pod, random_ipv4(), random_string(), random_uuid(), 16, 4096) return pod
def test_host(test_db): p = Pod.create('pod', 'pod', 10, -1) hosts = [ Host.create(p, random_ipv4(), random_string(prefix='host'), random_uuid(), 4, 4096) for i in range(6) ] for host in hosts: host.set_public() assert host is not None assert len(host.cores) == 4 full_cores, part_cores = host.get_free_cores() assert len(full_cores) == 4 assert len(part_cores) == 0 assert len(p.get_private_hosts()) == 0 assert get_max_container_count(p, 1, 0) == 0 assert centralized_schedule(p, 1, 1, 0) == {} for host in hosts[:3]: host.set_private() host_ids1 = {h.id for h in hosts[:3]} host_ids2 = {h.id for h in hosts[3:]} assert get_max_container_count(p, 1, 0) == 12 host_cores = centralized_schedule(p, 12, 1, 0) assert len(host_cores) == 3 for (host, count), cores in host_cores.iteritems(): assert host.id in host_ids1 assert host.id not in host_ids2 assert count == 4 assert len(cores['full']) == 4 assert get_max_container_count(p, 3, 0) == 3 host_cores = centralized_schedule(p, 3, 3, 0) assert len(host_cores) == 3 for (host, count), cores in host_cores.iteritems(): assert host.id in host_ids1 assert host.id not in host_ids2 assert count == 1 assert len(cores['full']) == 3 assert get_max_container_count(p, 2, 0) == 6 host_cores = centralized_schedule(p, 4, 2, 0) assert len(host_cores) == 2 for (host, count), cores in host_cores.iteritems(): assert host.id in host_ids1 assert host.id not in host_ids2 assert count == 2 assert len(cores['full']) == 4 assert get_max_container_count(p, 1, 1) == 9 host_cores = centralized_schedule(p, 3, 1, 1) assert len(host_cores) == 1 for (host, count), cores in host_cores.iteritems(): assert host.id in host_ids1 assert host.id not in host_ids2 assert count == 3 assert len(cores['full']) == 3 assert len(cores['part']) == 3 assert get_max_container_count(p, 2, 3) == 3 host_cores = centralized_schedule(p, 3, 2, 3) assert len(host_cores) == 3 for (host, count), cores in host_cores.iteritems(): assert host.id in host_ids1 assert host.id not in host_ids2 assert count == 1 assert len(cores['full']) == 2 assert len(cores['part']) == 1