def test_group_pod(test_db): p1 = Pod.create('pod1', 'pod1') p2 = Pod.create('pod1', 'pod1') assert p1 is not None assert p1.name == 'pod1' assert p2 is None p3 = Pod.get_by_name('pod1') assert p3.id == p1.id assert p3.get_free_public_hosts(10) == [] assert get_max_container_count(p3, 1, 2) == 0 assert centralized_schedule(p3, 1, 1, 2) == {}
def create_test_suite(): appyaml = { 'appname': 'app', 'entrypoints': { 'web': { 'cmd': 'python app.py', 'ports': ['5000/tcp'], }, 'daemon': { 'cmd': 'python daemon.py', }, 'service': { 'cmd': 'python service.py' }, }, 'build': 'pip install -r ./requirements.txt', } app = App.get_or_create('app', 'http://git.hunantv.com/group/app.git') version = app.add_version(random_sha1()) appconfig = version.appconfig appconfig.update(**appyaml) appconfig.save() pod = Pod.create('pod', 'pod') hosts = [ Host.create(pod, random_ipv4(), random_string(prefix='host'), random_uuid(), 4, 4096) for i in range(4) ] containers = [] for (host, count), cores in centralized_schedule(pod, 4, 4, 0).iteritems(): cores_per_container = len(cores) / count for i in range(count): cid = random_sha1() used_cores = { 'full': cores['full'][i * cores_per_container:(i + 1) * cores_per_container] } c = Container.create(cid, host, version, random_string(), 'web', used_cores, 'env') containers.append(c) host.occupy_cores(cores, 0) return app, version, pod, hosts, containers
def create_test_suite(): appyaml = { 'appname': 'app', 'entrypoints': { 'web': { 'cmd': 'python app.py', 'ports': ['5000/tcp'], }, 'daemon': { 'cmd': 'python daemon.py', }, 'service': { 'cmd': 'python service.py' }, }, 'build': 'pip install -r ./requirements.txt', } app = App.get_or_create('app', 'http://git.hunantv.com/group/app.git', 'token') version = app.add_version(random_sha1()) appconfig = version.appconfig appconfig.update(**appyaml) appconfig.save() group = Group.create('group', 'group') pod = Pod.create('pod', 'pod') pod.assigned_to_group(group) hosts = [Host.create(pod, random_ipv4(), random_string(prefix='host'), random_uuid(), 4, 4096) for i in range(4)] for host in hosts: host.assigned_to_group(group) containers = [] for (host, count), cores in centralized_schedule(group, pod, 4, 4, 0).iteritems(): cores_per_container = len(cores) / count for i in range(count): cid = random_sha1() used_cores = {'full': cores['full'][i*cores_per_container:(i+1)*cores_per_container]} c = Container.create(cid, host, version, random_string(), 'web', used_cores, 'env') containers.append(c) host.occupy_cores(cores, 0) return app, version, group, pod, hosts, containers
def test_centralized_schedule(test_db): # 4个16核, 不限制共享数 pod = _create_data(10, -1, 4) assert len(centralized_schedule(pod, ncontainer=100, ncore=2)) == 0 assert len(centralized_schedule(pod, ncontainer=130, ncore=0, nshare=5)) == 0 r = centralized_schedule(pod, ncontainer=10, ncore=1) assert len(r) == 1 assert sum(i[1] for i in r.keys()) == 10 for (host, count), cores in r.iteritems(): assert count == 10 assert len(cores['full']) == 10 assert len(cores['part']) == 0 r = centralized_schedule(pod, ncontainer=9, ncore=2) assert len(r) == 2 assert sum(i[1] for i in r.keys()) == 9 for (host, count), cores in r.iteritems(): assert count in (1, 8) if count == 1: assert len(cores['full']) == 2 assert len(cores['part']) == 0 if count == 8: assert len(cores['full']) == 16 assert len(cores['part']) == 0 r = centralized_schedule(pod, ncontainer=30, ncore=1, nshare=5) assert len(r) == 3 assert sum(i[1] for i in r.keys()) == 30 for (host, count), cores in r.iteritems(): assert count == 10 assert len(cores['full']) == 10 assert len(cores['part']) == 10 assert len(set(cores['part'])) == 5 r = centralized_schedule(pod, ncontainer=20, ncore=2, nshare=3) assert len(r) == 4 assert sum(i[1] for i in r.keys()) == 20 for (host, count), cores in r.iteritems(): assert count in (2, 6) if count == 2: assert len(cores['full']) == 4 assert len(cores['part']) == 2 assert len(set(cores['part'])) == 1 if count == 6: assert len(cores['full']) == 12 assert len(cores['part']) == 6 assert len(set(cores['part'])) == 2
def test_centralized_schedule(test_db): # 4个16核, 不限制共享数 group, pod = _create_data(10, -1, 4) assert len(centralized_schedule(group, pod, ncontainer=100, ncore=2)) == 0 assert len(centralized_schedule(group, pod, ncontainer=130, ncore=0, nshare=5)) == 0 r = centralized_schedule(group, pod, ncontainer=10, ncore=1) assert len(r) == 1 assert sum(i[1] for i in r.keys()) == 10 for (host, count), cores in r.iteritems(): assert count == 10 assert len(cores['full']) == 10 assert len(cores['part']) == 0 r = centralized_schedule(group, pod, ncontainer=9, ncore=2) assert len(r) == 2 assert sum(i[1] for i in r.keys()) == 9 for (host, count), cores in r.iteritems(): assert count in (1, 8) if count == 1: assert len(cores['full']) == 2 assert len(cores['part']) == 0 if count == 8: assert len(cores['full']) == 16 assert len(cores['part']) == 0 r = centralized_schedule(group, pod, ncontainer=30, ncore=1, nshare=5) assert len(r) == 3 assert sum(i[1] for i in r.keys()) == 30 for (host, count), cores in r.iteritems(): assert count == 10 assert len(cores['full']) == 10 assert len(cores['part']) == 10 assert len(set(cores['part'])) == 5 r = centralized_schedule(group, pod, ncontainer=20, ncore=2, nshare=3) assert len(r) == 4 assert sum(i[1] for i in r.keys()) == 20 for (host, count), cores in r.iteritems(): assert count in (2, 6) if count == 2: assert len(cores['full']) == 4 assert len(cores['part']) == 2 assert len(set(cores['part'])) == 1 if count == 6: assert len(cores['full']) == 12 assert len(cores['part']) == 6 assert len(set(cores['part'])) == 2
def test_group_pod(test_db): g1 = Group.create('group1', 'group1') g2 = Group.create('group1', 'group1') assert g1 is not None assert g1.name == 'group1' assert g2 is None p1 = Pod.create('pod1', 'pod1') p2 = Pod.create('pod1', 'pod1') assert p1 is not None assert p1.name == 'pod1' assert p2 is None g3 = Group.get_by_name('group1') assert g3.id == g1.id p3 = Pod.get_by_name('pod1') assert p3.id == p1.id assert p3.assigned_to_group(g3) assert p3.get_free_public_hosts(10) == [] assert get_max_container_count(g3, p3, 1, 2) == 0 assert centralized_schedule(g3, p3, 1, 1, 2) == {}
def create_private(group_name, pod_name, appname): """ncore: 需要的核心数, 可以是小数, 例如1.5个""" data = request.get_json() vstr = data['version'] group, pod, application, version = validate_instance(group_name, pod_name, appname, vstr) # TODO check if group has this pod core_require = int(float(data['ncore']) * pod.core_share) # 是说一个容器要几个核... ncore = core_require / pod.core_share nshare = core_require % pod.core_share ncontainer = int(data['ncontainer']) networks = Network.get_multi(data.get('networks', [])) spec_ips = data.get('spec_ips', []) appconfig = version.appconfig strategy = data.get('strategy', 'average') # 指定的host, 如果没有则按照编排分配host hostname = data.get('hostname', '') host = hostname and Host.get_by_name(hostname) or None if host and not (host.group_id == group.id and host.pod_id == pod.id): current_app.logger.error('Host must belong to pod/group (hostname=%s, pod=%s, group=%s)', host, pod_name, group_name) raise EruAbortException(consts.HTTP_BAD_REQUEST, 'Host must belong to this pod and group') if not data['entrypoint'] in appconfig.entrypoints: current_app.logger.error('Entrypoint not in app.yaml (entry=%s, name=%s, version=%s)', data['entrypoint'], appname, version.short_sha) raise EruAbortException(consts.HTTP_BAD_REQUEST, 'Entrypoint %s not in app.yaml' % data['entrypoint']) ts, keys = [], [] with rds.lock('%s:%s' % (group_name, pod_name)): if strategy == 'average': host_cores = average_schedule(group, pod, ncontainer, ncore, nshare, spec_host=host) elif strategy == 'centralized': host_cores = centralized_schedule(group, pod, ncontainer, ncore, nshare, spec_host=host) else: raise EruAbortException(consts.HTTP_BAD_REQUEST, 'strategy %s not supported' % strategy) if not host_cores: current_app.logger.error('Not enough cores (name=%s, version=%s, ncore=%s)', appname, version.short_sha, data['ncore']) raise EruAbortException(consts.HTTP_BAD_REQUEST, 'Not enough core resources') for (host, container_count), cores in host_cores.iteritems(): t = _create_task( version, host, container_count, cores, nshare, networks, spec_ips, data['entrypoint'], data['env'], image=data.get('image', ''), ) if not t: continue host.occupy_cores(cores, nshare) ts.append(t.id) keys.append(t.result_key) return {'r': 0, 'msg': 'ok', 'tasks': ts, 'watch_keys': keys}
def test_host(test_db): p = Pod.create('pod', 'pod', 10, -1) hosts = [ Host.create(p, random_ipv4(), random_string(prefix='host'), random_uuid(), 4, 4096) for i in range(6) ] for host in hosts: host.set_public() assert host is not None assert len(host.cores) == 4 full_cores, part_cores = host.get_free_cores() assert len(full_cores) == 4 assert len(part_cores) == 0 assert len(p.get_private_hosts()) == 0 assert get_max_container_count(p, 1, 0) == 0 assert centralized_schedule(p, 1, 1, 0) == {} for host in hosts[:3]: host.set_private() host_ids1 = {h.id for h in hosts[:3]} host_ids2 = {h.id for h in hosts[3:]} assert get_max_container_count(p, 1, 0) == 12 host_cores = centralized_schedule(p, 12, 1, 0) assert len(host_cores) == 3 for (host, count), cores in host_cores.iteritems(): assert host.id in host_ids1 assert host.id not in host_ids2 assert count == 4 assert len(cores['full']) == 4 assert get_max_container_count(p, 3, 0) == 3 host_cores = centralized_schedule(p, 3, 3, 0) assert len(host_cores) == 3 for (host, count), cores in host_cores.iteritems(): assert host.id in host_ids1 assert host.id not in host_ids2 assert count == 1 assert len(cores['full']) == 3 assert get_max_container_count(p, 2, 0) == 6 host_cores = centralized_schedule(p, 4, 2, 0) assert len(host_cores) == 2 for (host, count), cores in host_cores.iteritems(): assert host.id in host_ids1 assert host.id not in host_ids2 assert count == 2 assert len(cores['full']) == 4 assert get_max_container_count(p, 1, 1) == 9 host_cores = centralized_schedule(p, 3, 1, 1) assert len(host_cores) == 1 for (host, count), cores in host_cores.iteritems(): assert host.id in host_ids1 assert host.id not in host_ids2 assert count == 3 assert len(cores['full']) == 3 assert len(cores['part']) == 3 assert get_max_container_count(p, 2, 3) == 3 host_cores = centralized_schedule(p, 3, 2, 3) assert len(host_cores) == 3 for (host, count), cores in host_cores.iteritems(): assert host.id in host_ids1 assert host.id not in host_ids2 assert count == 1 assert len(cores['full']) == 2 assert len(cores['part']) == 1
def test_container(test_db): a = App.get_or_create('app', 'http://git.hunantv.com/group/app.git') assert a is not None assert a.id == a.user_id v = a.add_version(random_sha1()) assert v is not None assert v.app.id == a.id assert v.name == a.name assert len(v.containers.all()) == 0 assert len(v.tasks.all()) == 0 p = Pod.create('pod', 'pod', 10, -1) hosts = [ Host.create(p, random_ipv4(), random_string(prefix='host'), random_uuid(), 4, 4096) for i in range(6) ] for host in hosts[3:]: host.set_public() host_ids1 = {h.id for h in hosts[:3]} host_ids2 = {h.id for h in hosts[3:]} host_cores = centralized_schedule(p, 3, 3, 0) #测试没有碎片核的情况 #获取核 containers = [] for (host, count), cores in host_cores.iteritems(): host.occupy_cores(cores, 0) cores_per_container = len(cores['full']) / count for i in range(count): cid = random_sha1() used_cores = { 'full': cores['full'][i * cores_per_container:(i + 1) * cores_per_container] } c = Container.create(cid, host, v, random_string(), 'entrypoint', used_cores, 'env', nshare=0) assert c is not None containers.append(c) for host in p.get_private_hosts(): full_cores, part_cores = host.get_free_cores() assert len(full_cores) == 1 assert len(part_cores) == 0 assert len(host.containers.all()) == 1 assert host.count == 1 assert len(containers) == 3 assert len(v.containers.all()) == 3 for c in containers: assert c.host_id in host_ids1 assert c.host_id not in host_ids2 assert c.app.id == a.id assert c.version.id == v.id assert c.is_alive assert len(c.full_cores) == 3 assert len(c.part_cores) == 0 all_core_labels = sorted([ '0', '1', '2', '3', ]) used_full_core_labels = [core.label for core in c.full_cores] used_part_core_labels = [core.label for core in c.part_cores] free_core_labels = [core.label for core in c.host.get_free_cores()[0]] assert all_core_labels == sorted(used_full_core_labels + used_part_core_labels + free_core_labels) #释放核 for c in containers: c.delete() assert len(v.containers.all()) == 0 assert get_max_container_count(p, 3, 0) == 3 host_cores = centralized_schedule(p, 3, 3, 0) assert len(host_cores) == 3 for host in p.get_private_hosts(): full_cores, part_cores = host.get_free_cores() assert len(full_cores) == 4 assert len(part_cores) == 0 assert len(host.containers.all()) == 0 assert host.count == 4 #测试有碎片的情况 #获取核 host_cores = centralized_schedule(p, 3, 3, 4) containers = [] for (host, count), cores in host_cores.iteritems(): cores_per_container = len(cores['full']) / count host.occupy_cores(cores, 4) for i in range(count): cid = random_sha1() used_cores = { 'full': cores['full'][i * cores_per_container:(i + 1) * cores_per_container], 'part': cores['part'] } # not using a port c = Container.create(cid, host, v, random_string(), 'entrypoint', used_cores, 'env', nshare=4) assert c is not None containers.append(c) for host in p.get_private_hosts(): full_cores, part_cores = host.get_free_cores() assert len(full_cores) == 0 assert len(part_cores) == 1 assert part_cores[0].remain == 6 assert len(host.containers.all()) == 1 assert host.count == D('0.6') assert len(containers) == 3 assert len(v.containers.all()) == 3 for c in containers: assert c.host_id in host_ids1 assert c.host_id not in host_ids2 assert c.app.id == a.id assert c.version.id == v.id assert c.is_alive assert len(c.full_cores) == 3 assert len(c.part_cores) == 1 all_core_labels = sorted([ '0', '1', '2', '3', ]) used_full_core_labels = [core.label for core in c.full_cores] used_part_core_labels = [core.label for core in c.part_cores] free_core_labels = [core.label for core in c.host.get_free_cores()[0]] assert all_core_labels == sorted(used_full_core_labels + used_part_core_labels + free_core_labels) #释放核 for c in containers: c.delete() assert len(v.containers.all()) == 0 assert get_max_container_count(p, 3, 0) == 3 host_cores = centralized_schedule(p, 3, 3, 0) assert len(host_cores) == 3 for host in p.get_private_hosts(): full_cores, part_cores = host.get_free_cores() assert len(full_cores) == 4 assert len(host.containers.all()) == 0 assert host.count == 4 #获取 host_cores = centralized_schedule(p, 6, 1, 5) containers = [] for (host, count), cores in host_cores.iteritems(): cores_per_container = len(cores['full']) / count for i in range(count): cid = random_sha1() used_cores = { 'full': cores['full'][i * cores_per_container:(i + 1) * cores_per_container], 'part': cores['part'][i:i + 1], } host.occupy_cores(used_cores, 5) # not using a port c = Container.create(cid, host, v, random_string(), 'entrypoint', used_cores, 'env', nshare=5) assert c is not None containers.append(c) for host in p.get_private_hosts(): full_cores, part_cores = host.get_free_cores() assert len(full_cores) == 1 assert len(part_cores) == 0 assert len(host.containers.all()) == 2 assert host.count == D('1') assert len(containers) == 6 assert len(v.containers.all()) == 6 for c in containers: assert c.host_id in host_ids1 assert c.host_id not in host_ids2 assert c.app.id == a.id assert c.version.id == v.id assert c.is_alive assert len(c.full_cores) == 1 assert len(c.part_cores) == 1 ##释放核 for c in containers: c.delete() assert len(v.containers.all()) == 0 assert get_max_container_count(p, 3, 0) == 3 host_cores = centralized_schedule(p, 3, 3, 0) assert len(host_cores) == 3 for host in p.get_private_hosts(): full_cores, part_cores = host.get_free_cores() assert len(full_cores) == 4 assert len(part_cores) == 0 assert len(host.containers.all()) == 0 assert host.count == 4
def test_host(test_db): g = Group.create('group', 'group') p = Pod.create('pod', 'pod', 10, -1) assert p.assigned_to_group(g) hosts = [Host.create(p, random_ipv4(), random_string(prefix='host'), random_uuid(), 4, 4096) for i in range(6)] for host in hosts: assert host is not None assert len(host.cores) == 4 full_cores, part_cores = host.get_free_cores() assert len(full_cores) == 4 assert len(part_cores) == 0 assert len(g.private_hosts.all()) == 0 assert get_max_container_count(g, p, 1, 0) == 0 assert centralized_schedule(g, p, 1, 1, 0) == {} for host in hosts[:3]: host.assigned_to_group(g) host_ids1 = {h.id for h in hosts[:3]} host_ids2 = {h.id for h in hosts[3:]} assert len(g.private_hosts.all()) == 3 assert get_max_container_count(g, p, 1, 0) == 12 host_cores = centralized_schedule(g, p, 12, 1, 0) assert len(host_cores) == 3 for (host, count), cores in host_cores.iteritems(): assert host.id in host_ids1 assert host.id not in host_ids2 assert count == 4 assert len(cores['full']) == 4 assert get_max_container_count(g, p, 3, 0) == 3 host_cores = centralized_schedule(g, p, 3, 3, 0) assert len(host_cores) == 3 for (host, count), cores in host_cores.iteritems(): assert host.id in host_ids1 assert host.id not in host_ids2 assert count == 1 assert len(cores['full']) == 3 assert get_max_container_count(g, p, 2, 0) == 6 host_cores = centralized_schedule(g, p, 4, 2, 0) assert len(host_cores) == 2 for (host, count), cores in host_cores.iteritems(): assert host.id in host_ids1 assert host.id not in host_ids2 assert count == 2 assert len(cores['full']) == 4 assert get_max_container_count(g, p, 1, 1) == 9 host_cores = centralized_schedule(g, p, 3, 1, 1) assert len(host_cores) == 1 for (host, count), cores in host_cores.iteritems(): assert host.id in host_ids1 assert host.id not in host_ids2 assert count == 3 assert len(cores['full']) == 3 assert len(cores['part']) == 3 assert get_max_container_count(g, p, 2, 3) == 3 host_cores = centralized_schedule(g, p, 3, 2, 3) assert len(host_cores) == 3 for (host, count), cores in host_cores.iteritems(): assert host.id in host_ids1 assert host.id not in host_ids2 assert count == 1 assert len(cores['full']) == 2 assert len(cores['part']) == 1
def test_container(test_db): a = App.get_or_create('app', 'http://git.hunantv.com/group/app.git', '') assert a is not None assert a.id == a.user_id v = a.add_version(random_sha1()) assert v is not None assert v.app.id == a.id assert v.name == a.name assert len(v.containers.all()) == 0 assert len(v.tasks.all()) == 0 g = Group.create('group', 'group') p = Pod.create('pod', 'pod', 10, -1) assert p.assigned_to_group(g) hosts = [Host.create(p, random_ipv4(), random_string(prefix='host'), random_uuid(), 4, 4096) for i in range(6)] for host in hosts[:3]: host.assigned_to_group(g) host_ids1 = {h.id for h in hosts[:3]} host_ids2 = {h.id for h in hosts[3:]} host_cores = centralized_schedule(g, p, 3, 3, 0) #测试没有碎片核的情况 #获取核 containers = [] for (host, count), cores in host_cores.iteritems(): host.occupy_cores(cores, 0) cores_per_container = len(cores['full']) / count for i in range(count): cid = random_sha1() used_cores = {'full': cores['full'][i*cores_per_container:(i+1)*cores_per_container]} c = Container.create(cid, host, v, random_string(), 'entrypoint', used_cores, 'env', nshare=0) assert c is not None containers.append(c) for host in g.private_hosts.all(): full_cores, part_cores = host.get_free_cores() assert len(full_cores) == 1 assert len(part_cores) == 0 assert len(host.containers.all()) == 1 assert host.count == 1 assert len(containers) == 3 assert len(v.containers.all()) == 3 for c in containers: assert c.host_id in host_ids1 assert c.host_id not in host_ids2 assert c.app.id == a.id assert c.version.id == v.id assert c.is_alive assert len(c.full_cores) == 3 assert len(c.part_cores) == 0 all_core_labels = sorted(['0', '1', '2', '3', ]) used_full_core_labels = [core.label for core in c.full_cores] used_part_core_labels = [core.label for core in c.part_cores] free_core_labels = [core.label for core in c.host.get_free_cores()[0]] assert all_core_labels == sorted(used_full_core_labels + used_part_core_labels + free_core_labels) #释放核 for c in containers: c.delete() assert len(v.containers.all()) == 0 assert get_max_container_count(g, p, 3, 0) == 3 host_cores = centralized_schedule(g, p, 3, 3, 0) assert len(host_cores) == 3 for host in g.private_hosts.all(): full_cores, part_cores = host.get_free_cores() assert len(full_cores) == 4 assert len(part_cores) == 0 assert len(host.containers.all()) == 0 assert host.count == 4 #测试有碎片的情况 #获取核 host_cores = centralized_schedule(g, p, 3, 3, 4) containers = [] for (host, count), cores in host_cores.iteritems(): cores_per_container = len(cores['full']) / count host.occupy_cores(cores, 4) for i in range(count): cid = random_sha1() used_cores = {'full': cores['full'][i*cores_per_container:(i+1)*cores_per_container], 'part': cores['part']} # not using a port c = Container.create(cid, host, v, random_string(), 'entrypoint', used_cores, 'env', nshare=4) assert c is not None containers.append(c) for host in g.private_hosts.all(): full_cores, part_cores = host.get_free_cores() assert len(full_cores) == 0 assert len(part_cores) == 1 assert part_cores[0].remain == 6 assert len(host.containers.all()) == 1 assert host.count == D('0.6') assert len(containers) == 3 assert len(v.containers.all()) == 3 for c in containers: assert c.host_id in host_ids1 assert c.host_id not in host_ids2 assert c.app.id == a.id assert c.version.id == v.id assert c.is_alive assert len(c.full_cores) == 3 assert len(c.part_cores) == 1 all_core_labels = sorted(['0', '1', '2', '3', ]) used_full_core_labels = [core.label for core in c.full_cores] used_part_core_labels = [core.label for core in c.part_cores] free_core_labels = [core.label for core in c.host.get_free_cores()[0]] assert all_core_labels == sorted(used_full_core_labels + used_part_core_labels + free_core_labels) #释放核 for c in containers: c.delete() assert len(v.containers.all()) == 0 assert get_max_container_count(g, p, 3, 0) == 3 host_cores = centralized_schedule(g, p, 3, 3, 0) assert len(host_cores) == 3 for host in g.private_hosts.all(): full_cores, part_cores = host.get_free_cores() assert len(full_cores) == 4 assert len(host.containers.all()) == 0 assert host.count == 4 #获取 host_cores = centralized_schedule(g, p, 6, 1, 5) containers = [] for (host, count), cores in host_cores.iteritems(): cores_per_container = len(cores['full']) / count for i in range(count): cid = random_sha1() used_cores = { 'full': cores['full'][i*cores_per_container:(i+1)*cores_per_container], 'part': cores['part'][i:i+1], } host.occupy_cores(used_cores, 5) # not using a port c = Container.create(cid, host, v, random_string(), 'entrypoint', used_cores, 'env', nshare=5) assert c is not None containers.append(c) for host in g.private_hosts.all(): full_cores, part_cores = host.get_free_cores() assert len(full_cores) == 1 assert len(part_cores) == 0 assert len(host.containers.all()) == 2 assert host.count == D('1') assert len(containers) == 6 assert len(v.containers.all()) == 6 for c in containers: assert c.host_id in host_ids1 assert c.host_id not in host_ids2 assert c.app.id == a.id assert c.version.id == v.id assert c.is_alive assert len(c.full_cores) == 1 assert len(c.part_cores) == 1 ##释放核 for c in containers: c.delete() assert len(v.containers.all()) == 0 assert get_max_container_count(g, p, 3, 0) == 3 host_cores = centralized_schedule(g, p, 3, 3, 0) assert len(host_cores) == 3 for host in g.private_hosts.all(): full_cores, part_cores = host.get_free_cores() assert len(full_cores) == 4 assert len(part_cores) == 0 assert len(host.containers.all()) == 0 assert host.count == 4
def test_scheduler(test_db): # 10000个16核, 不限制共享数 pod = _create_data(10, -1, 10000) def test_max_container_count(ncore, nshare, expected): start = time.time() assert get_max_container_count(pod, ncore, nshare) == expected delta = time.time() - start _log.debug('test_max_container_count with ncore={}, nshare={}, expected={} takes {}'.format(ncore, nshare, expected, delta)) test_max_container_count_cases = ( (1, 0, 160000), (2, 0, 80000), (3, 0, 50000), (4, 0, 40000), (5, 0, 30000), (1, 5, 100000), (2, 5, 60000), (3, 5, 40000), (1, 1, 140000), (2, 1, 70000), ) for case in test_max_container_count_cases: test_max_container_count(*case) start = time.time() assert len(average_schedule(pod, ncontainer=100, ncore=2)) == 100 print time.time() - start start = time.time() assert len(average_schedule(pod, ncontainer=130, ncore=0, nshare=5)) == 130 print time.time() - start start = time.time() r = average_schedule(pod, ncontainer=10, ncore=1) print time.time() - start assert len(r) == 10 assert sum(i[1] for i in r.keys()) == 10 for (host, count), cores in r.iteritems(): assert len(cores['full']) == 1 assert len(cores['part']) == 0 start = time.time() r = average_schedule(pod, ncontainer=10000, ncore=2) print time.time() - start assert len(r) == 10000 assert sum(i[1] for i in r.keys()) == 10000 for (host, count), cores in r.iteritems(): assert count == 1 assert len(cores['full']) == 2 assert len(cores['part']) == 0 start = time.time() r = average_schedule(pod, ncontainer=10000, ncore=1, nshare=5) print time.time() - start assert len(r) == 10000 assert sum(i[1] for i in r.keys()) == 10000 for (host, count), cores in r.iteritems(): assert count == 1 assert len(cores['full']) == 1 assert len(cores['part']) == 1 start = time.time() r = average_schedule(pod, ncontainer=10000, ncore=2, nshare=3) print time.time() - start assert len(r) == 10000 assert sum(i[1] for i in r.keys()) == 10000 for (host, count), cores in r.iteritems(): assert count == 1 assert len(cores['full']) == 2 assert len(cores['part']) == 1 start = time.time() assert len(centralized_schedule(pod, ncontainer=100, ncore=2)) == 13 print time.time() - start start = time.time() assert len(centralized_schedule(pod, ncontainer=130, ncore=0, nshare=5)) == 5 print time.time() - start start = time.time() r = centralized_schedule(pod, ncontainer=10000, ncore=1) print time.time() - start assert len(r) == 625 assert sum(i[1] for i in r.keys()) == 10000 for (host, count), cores in r.iteritems(): assert count == 16 assert len(cores['full']) == 16 assert len(cores['part']) == 0 start = time.time() r = centralized_schedule(pod, ncontainer=100, ncore=2) print time.time() - start assert len(r) == 13 assert sum(i[1] for i in r.keys()) == 100 for (host, count), cores in r.iteritems(): assert count in (4, 8) if count == 4: assert len(cores['full']) == 8 assert len(cores['part']) == 0 if count == 8: assert len(cores['full']) == 16 assert len(cores['part']) == 0 start = time.time() r = centralized_schedule(pod, ncontainer=30, ncore=1, nshare=5) print time.time() - start assert len(r) == 3 assert sum(i[1] for i in r.keys()) == 30 for (host, count), cores in r.iteritems(): assert count == 10 assert len(cores['full']) == 10 assert len(cores['part']) == 10 assert len(set(cores['part'])) == 5 start = time.time() r = centralized_schedule(pod, ncontainer=20, ncore=2, nshare=3) print time.time() - start assert len(r) == 4 assert sum(i[1] for i in r.keys()) == 20 for (host, count), cores in r.iteritems(): assert count in (2, 6) if count == 2: assert len(cores['full']) == 4 assert len(cores['part']) == 2 assert len(set(cores['part'])) == 1 if count == 6: assert len(cores['full']) == 12 assert len(cores['part']) == 6 assert len(set(cores['part'])) == 2