예제 #1
0
def test_container_release_cores(test_db):
    a = App.get_or_create('app', 'http://git.hunantv.com/group/app.git')
    v = a.add_version(random_sha1())
    p = Pod.create('pod', 'pod', 10, -1)
    host = Host.create(p, random_ipv4(), random_string(), random_uuid(), 200, 0)

    for core in host.cores:
        assert core.host_id == host.id
        assert core.remain == 10

    containers = []
    
    cores = sorted(host.cores, key=operator.attrgetter('label'))
    for fcores, pcores in zip(chunked(cores[:100], 10), chunked(cores[100:], 10)):
        used_cores = {'full': fcores, 'part': pcores}
        host.occupy_cores(used_cores, 5)
        c = Container.create(random_sha1(), host, v, random_string(), 'entrypoint', used_cores, 'env', nshare=5)
        containers.append(c)

    cores = sorted(host.cores, key=operator.attrgetter('label'))
    for fcores, pcores in zip(chunked(cores[:100], 10), chunked(cores[100:], 10)):
        for core in fcores:
            assert core.remain == 0
        for core in pcores:
            assert core.remain == 5

    for c in containers:
        c.delete()

    cores = sorted(host.cores, key=operator.attrgetter('label'))
    for fcores, pcores in zip(chunked(cores[:100], 10), chunked(cores[100:], 10)):
        for core in fcores:
            assert core.remain == 10
        for core in pcores:
            assert core.remain == 10
예제 #2
0
def test_container_transform(test_db):
    a = App.get_or_create('app', 'http://git.hunantv.com/group/app.git', '')
    assert a is not None

    v = a.add_version(random_sha1())
    v2 = a.add_version(random_sha1())
    assert v is not None
    assert v.app.id == a.id
    assert v.name == a.name
    assert len(v.containers.all()) == 0
    assert len(v.tasks.all()) == 0

    g = Group.create('group', 'group')
    p = Pod.create('pod', 'pod')
    assert p.assigned_to_group(g)
    hosts = [Host.create(p, random_ipv4(), random_string(prefix='host'),
        random_uuid(), 4, 4096) for i in range(6)]

    for host in hosts[:3]:
        host.assigned_to_group(g)

    assert g.get_max_containers(p, 3, 0) == 3
    host_cores = g.get_free_cores(p, 3, 3, 0)
    assert len(host_cores) == 3

    containers = []
    for (host, count), cores in host_cores.iteritems():
        cores_per_container = len(cores) / count
        for i in range(count):
            cid = random_sha1()
            used_cores = {'full': cores['full'][i*cores_per_container:(i+1)*cores_per_container]}
            c = Container.create(cid, host, v, random_string(), 'entrypoint', used_cores, 'env')
            assert c is not None
            containers.append(c)
        host.occupy_cores(cores, 0)

    for host in g.private_hosts.all():
        assert len(host.get_free_cores()[0]) == 1
        assert len(host.containers.all()) == 1
        assert host.count == 1

    assert len(containers) == 3
    assert len(v.containers.all()) == 3

    cids = [c.container_id for c in containers]
    for c in containers:
        host = c.host
        cid = c.container_id
        c.transform(v2, random_sha1(), random_string())
        assert c.container_id != cid

    new_cids = [c.container_id for c in containers]
    assert new_cids != cids
예제 #3
0
def test_container_release_cores(test_db):
    a = App.get_or_create('app', 'http://git.hunantv.com/group/app.git')
    v = a.add_version(random_sha1())
    p = Pod.create('pod', 'pod', 10, -1)
    host = Host.create(p, random_ipv4(), random_string(), random_uuid(), 200,
                       0)

    for core in host.cores:
        assert core.host_id == host.id
        assert core.remain == 10

    containers = []

    cores = sorted(host.cores, key=operator.attrgetter('label'))
    for fcores, pcores in zip(chunked(cores[:100], 10),
                              chunked(cores[100:], 10)):
        used_cores = {'full': fcores, 'part': pcores}
        host.occupy_cores(used_cores, 5)
        c = Container.create(random_sha1(),
                             host,
                             v,
                             random_string(),
                             'entrypoint',
                             used_cores,
                             'env',
                             nshare=5)
        containers.append(c)

    cores = sorted(host.cores, key=operator.attrgetter('label'))
    for fcores, pcores in zip(chunked(cores[:100], 10),
                              chunked(cores[100:], 10)):
        for core in fcores:
            assert core.remain == 0
        for core in pcores:
            assert core.remain == 5

    for c in containers:
        c.delete()

    cores = sorted(host.cores, key=operator.attrgetter('label'))
    for fcores, pcores in zip(chunked(cores[:100], 10),
                              chunked(cores[100:], 10)):
        for core in fcores:
            assert core.remain == 10
        for core in pcores:
            assert core.remain == 10
예제 #4
0
def create_test_suite():
    appyaml = {
        'appname': 'app',
        'entrypoints': {
            'web': {
                'cmd': 'python app.py',
                'ports': ['5000/tcp'],
            },
            'daemon': {
                'cmd': 'python daemon.py',
            },
            'service': {
                'cmd': 'python service.py'
            },
        },
        'build': 'pip install -r ./requirements.txt',
    }
    app = App.get_or_create('app', 'http://git.hunantv.com/group/app.git')
    version = app.add_version(random_sha1())
    appconfig = version.appconfig
    appconfig.update(**appyaml)
    appconfig.save()

    pod = Pod.create('pod', 'pod')

    hosts = [
        Host.create(pod, random_ipv4(), random_string(prefix='host'),
                    random_uuid(), 4, 4096) for i in range(4)
    ]

    containers = []
    for (host, count), cores in centralized_schedule(pod, 4, 4, 0).iteritems():
        cores_per_container = len(cores) / count
        for i in range(count):
            cid = random_sha1()
            used_cores = {
                'full':
                cores['full'][i * cores_per_container:(i + 1) *
                              cores_per_container]
            }
            c = Container.create(cid, host, version, random_string(), 'web',
                                 used_cores, 'env')
            containers.append(c)
        host.occupy_cores(cores, 0)
    return app, version, pod, hosts, containers
예제 #5
0
파일: prepare.py 프로젝트: lcsandy/eru-core
def create_test_suite():
    appyaml = {
        'appname': 'app',
        'entrypoints': {
            'web': {
                'cmd': 'python app.py',
                'ports': ['5000/tcp'],
            },
            'daemon': {
                'cmd': 'python daemon.py',
            },
            'service': {
                'cmd': 'python service.py'
            },
        },
        'build': 'pip install -r ./requirements.txt',
    }
    app = App.get_or_create('app', 'http://git.hunantv.com/group/app.git', 'token')
    version = app.add_version(random_sha1())
    appconfig = version.appconfig
    appconfig.update(**appyaml)
    appconfig.save()

    group = Group.create('group', 'group')
    pod = Pod.create('pod', 'pod')
    pod.assigned_to_group(group)

    hosts = [Host.create(pod, random_ipv4(), random_string(prefix='host'),
        random_uuid(), 4, 4096) for i in range(4)]

    for host in hosts:
        host.assigned_to_group(group)

    containers = []
    for (host, count), cores in group.get_free_cores(pod, 4, 4, 0).iteritems():
        cores_per_container = len(cores) / count
        for i in range(count):
            cid = random_sha1()
            used_cores = cores['full'][i*cores_per_container:(i+1)*cores_per_container]
            c = Container.create(cid, host, version, random_string(), 'entrypoint', used_cores, 'env')
            containers.append(c)
        host.occupy_cores(cores, 0)
    return app, version, group, pod, hosts, containers
예제 #6
0
def test_container(test_db):
    a = App.get_or_create('app', 'http://git.hunantv.com/group/app.git', '')
    assert a is not None
    assert a.id == a.user_id

    v = a.add_version(random_sha1())
    assert v is not None
    assert v.app.id == a.id
    assert v.name == a.name
    assert len(v.containers.all()) == 0
    assert len(v.tasks.all()) == 0

    g = Group.create('group', 'group')
    p = Pod.create('pod', 'pod', 10, -1)
    assert p.assigned_to_group(g)
    hosts = [Host.create(p, random_ipv4(), random_string(prefix='host'),
        random_uuid(), 4, 4096) for i in range(6)]

    for host in hosts[:3]:
        host.assigned_to_group(g)
    host_ids1 = {h.id for h in hosts[:3]}
    host_ids2 = {h.id for h in hosts[3:]}

    host_cores = g.get_free_cores(p, 3, 3, 0)

    #测试没有碎片核的情况
    #获取核
    containers = []
    for (host, count), cores in host_cores.iteritems():
        cores_per_container = len(cores['full']) / count
        for i in range(count):
            cid = random_sha1()
            used_cores = {'full': cores['full'][i*cores_per_container:(i+1)*cores_per_container]}
            # not using a port
            c = Container.create(cid, host, v, random_string(), 'entrypoint', used_cores, 'env')
            assert c is not None
            containers.append(c)
        host.occupy_cores(cores, 0)

    for host in g.private_hosts.all():
        full_cores, part_cores = host.get_free_cores()
        assert len(full_cores) == 1
        assert len(part_cores) == 0
        assert len(host.containers.all()) == 1
        assert host.count == 1

    assert len(containers) == 3
    assert len(v.containers.all()) == 3

    for c in containers:
        assert c.host_id in host_ids1
        assert c.host_id not in host_ids2
        assert c.app.id == a.id
        assert c.version.id == v.id
        assert c.is_alive
        assert len(c.full_cores.all()) == 3
        assert len(c.part_cores.all()) == 0
        all_core_labels = sorted(['0', '1', '2', '3', ])
        used_full_core_labels = [core.label for core in c.full_cores.all()]
        used_part_core_labels = [core.label for core in c.part_cores.all()]
        free_core_labels = [core.label for core in c.host.get_free_cores()[0]]
        assert all_core_labels == sorted(used_full_core_labels + used_part_core_labels + free_core_labels)

    #释放核
    for c in containers:
        c.delete()

    assert len(v.containers.all()) == 0
    assert g.get_max_containers(p, 3, 0) == 3
    host_cores = g.get_free_cores(p, 3, 3, 0)
    assert len(host_cores) == 3

    for host in g.private_hosts.all():
        full_cores, part_cores = host.get_free_cores()
        assert len(full_cores) == 4
        assert len(part_cores) == 0
        assert len(host.containers.all()) == 0
        assert host.count == 0

    #测试有碎片的情况
    #获取核
    host_cores = g.get_free_cores(p, 3, 3, 4)
    containers = []
    for (host, count), cores in host_cores.iteritems():
        cores_per_container = len(cores['full']) / count
        for i in range(count):
            cid = random_sha1()
            used_cores = {'full':  cores['full'][i*cores_per_container:(i+1)*cores_per_container],
                    'part': cores['part']}
            # not using a port
            c = Container.create(cid, host, v, random_string(), 'entrypoint', used_cores, 'env')
            assert c is not None
            containers.append(c)
        host.occupy_cores(cores, 4)

    for host in g.private_hosts.all():
        full_cores, part_cores = host.get_free_cores()
        assert len(full_cores) == 0
        assert len(part_cores) == 1
        assert part_cores[0].used == 4
        assert len(host.containers.all()) == 1
        assert host.count == 1

    assert len(containers) == 3
    assert len(v.containers.all()) == 3

    for c in containers:
        assert c.host_id in host_ids1
        assert c.host_id not in host_ids2
        assert c.app.id == a.id
        assert c.version.id == v.id
        assert c.is_alive
        assert len(c.full_cores.all()) == 3
        assert len(c.part_cores.all()) == 1
        all_core_labels = sorted(['0', '1', '2', '3', ])
        used_full_core_labels = [core.label for core in c.full_cores.all()]
        used_part_core_labels = [core.label for core in c.part_cores.all()]
        free_core_labels = [core.label for core in c.host.get_free_cores()[0]]
        assert all_core_labels == sorted(used_full_core_labels + used_part_core_labels + free_core_labels)


    #释放核
    for c in containers:
        c.delete(4)

    assert len(v.containers.all()) == 0
    assert g.get_max_containers(p, 3, 0) == 3
    host_cores = g.get_free_cores(p, 3, 3, 0)
    assert len(host_cores) == 3

    for host in g.private_hosts.all():
        full_cores, part_cores = host.get_free_cores()
        assert len(full_cores) == 4
        assert len(host.containers.all()) == 0
        assert host.count == 0

    #获取
    host_cores = g.get_free_cores(p, 6, 1, 5)
    containers = []
    for (host, count), cores in host_cores.iteritems():
        cores_per_container = len(cores['full']) / count
        for i in range(count):
            cid = random_sha1()
            used_cores = {'full':  cores['full'][i*cores_per_container:(i+1)*cores_per_container],
                    'part': cores['part'][i:i+1]}
            # not using a port
            c = Container.create(cid, host, v, random_string(), 'entrypoint', used_cores, 'env')
            assert c is not None
            containers.append(c)
            host.occupy_cores(used_cores, 5)

    for host in g.private_hosts.all():
        full_cores, part_cores = host.get_free_cores()
        assert len(full_cores) == 1
        assert len(part_cores) == 0
        assert len(host.containers.all()) == 2
        assert host.count == 2

    assert len(containers) == 6
    assert len(v.containers.all()) == 6

    for c in containers:
        assert c.host_id in host_ids1
        assert c.host_id not in host_ids2
        assert c.app.id == a.id
        assert c.version.id == v.id
        assert c.is_alive
        assert len(c.full_cores.all()) == 1
        assert len(c.part_cores.all()) == 1

    ##释放核
    for c in containers:
        c.delete(5)

    assert len(v.containers.all()) == 0
    assert g.get_max_containers(p, 3, 0) == 3
    host_cores = g.get_free_cores(p, 3, 3, 0)
    assert len(host_cores) == 3

    for host in g.private_hosts.all():
        full_cores, part_cores = host.get_free_cores()
        assert len(full_cores) == 4
        assert len(part_cores) == 0
        assert len(host.containers.all()) == 0
        assert host.count == 0
예제 #7
0
파일: task.py 프로젝트: marswon/eru-core
def create_containers_with_macvlan(task_id, ncontainer, core_ids, network_ids):
    """
    执行task_id的任务. 部署ncontainer个容器, 占用core_ids这些核, 绑定到networks这些子网
    """
    task = Task.get(task_id)
    if not task:
        return

    networks = Network.get_multi(network_ids)

    notifier = TaskNotifier(task)
    host = task.host
    version = task.version
    entrypoint = task.props['entrypoint']
    env = task.props['env']
    used_cores = Core.get_multi(core_ids)

    pub_agent_vlan_key = 'eru:agent:%s:vlan' % host.name
    feedback_key = 'eru:agent:%s:feedback' % task_id

    cids = []

    for cores in more_itertools.chunked(used_cores, len(core_ids)/ncontainer):
        try:
            cid, cname = dockerjob.create_one_container(host, version,
                    entrypoint, env, cores)
        except:
            host.release_cores(cores)
            continue

        ips = [n.acquire_ip() for n in networks]
        ip_dict = {ip.vlan_address: ip for ip in ips}

        if ips:
            ident_id = cname.split('_')[-1]
            values = [str(task_id), cid, ident_id] + ['{0}:{1}'.format(ip.vlan_seq_id, ip.vlan_address) for ip in ips]
            rds.publish(pub_agent_vlan_key, '|'.join(values))

        for _ in ips:
            # timeout 15s
            rv = rds.blpop(feedback_key, 15)
            if rv is None:
                break
            # rv is like (feedback_key, 'succ|container_id|vethname|ip')
            succ, _, vethname, vlan_address = rv[1].split('|')
            if succ == '0':
                break
            ip = ip_dict.get(vlan_address, None)
            if ip:
                ip.set_vethname(vethname)

        else:
            logger.info('Creating container with cid %s and ips %s' % (cid, ips))
            c = Container.create(cid, host, version, cname, entrypoint, cores, env)
            for ip in ips:
                ip.assigned_to_container(c)
            notifier.notify_agent(cid)
            add_container_for_agent(c)
            add_container_backends(c)
            cids.append(cid)
            # 略过清理工作
            continue

        # 清理掉失败的容器, 释放核, 释放ip
        logger.info('Cleaning failed container with cid %s' % cid)
        dockerjob.remove_container_by_cid([cid], host)
        host.release_cores(cores)
        [ip.release() for ip in ips]

    publish_to_service_discovery(version.name)
    task.finish_with_result(code.TASK_SUCCESS, container_ids=cids)
    notifier.pub_success()
예제 #8
0
파일: task.py 프로젝트: BlueKarl/eru-core
def create_containers_with_macvlan_public(task_id, ncontainer, nshare, network_ids, spec_ips=None):
    """
    执行task_id的任务. 部署ncontainer个容器, 绑定到networks这些子网
    """
    current_flask.logger.info('Task<id=%s>: Started', task_id)
    task = Task.get(task_id)
    if not task:
        current_flask.logger.error('Task (id=%s) not found, quit', task_id)
        return

    if spec_ips is None:
        spec_ips = []

    networks = Network.get_multi(network_ids)

    notifier = TaskNotifier(task)
    host = task.host
    version = task.version
    entrypoint = task.props['entrypoint']
    env = task.props['env']
    # use raw
    image = task.props['image']
    cpu_shares = 1024

    pub_agent_vlan_key = 'eru:agent:%s:vlan' % host.name
    feedback_key = 'eru:agent:%s:feedback' % task_id

    cids = []

    for _ in range(ncontainer):
        try:
            cid, cname = dockerjob.create_one_container(host, version,
                entrypoint, env, cores=None, cpu_shares=cpu_shares, image=image)
        except Exception as e:
            print e # 同上
            continue

        if spec_ips:
            ips = [n.acquire_specific_ip(ip) for n, ip in zip(networks, spec_ips)]
        else:
            ips = [n.acquire_ip() for n in networks]
        ips = [i for i in ips if i]
        ip_dict = {ip.vlan_address: ip for ip in ips}

        if ips:
            ident_id = cname.split('_')[-1]
            values = [str(task_id), cid, ident_id] + ['{0}:{1}'.format(ip.vlan_seq_id, ip.vlan_address) for ip in ips]
            rds.publish(pub_agent_vlan_key, '|'.join(values))

        for _ in ips:
            # timeout 15s
            rv = rds.blpop(feedback_key, 15)
            if rv is None:
                break
            # rv is like (feedback_key, 'succ|container_id|vethname|ip')
            succ, _, vethname, vlan_address = rv[1].split('|')
            if succ == '0':
                break
            ip = ip_dict.get(vlan_address, None)
            if ip:
                ip.set_vethname(vethname)

        else:
            current_flask.logger.info('Creating container (cid=%s, ips=%s)', cid, ips)
            c = Container.create(cid, host, version, cname, entrypoint, {}, env, nshare)
            for ip in ips:
                ip.assigned_to_container(c)
            notifier.notify_agent(cid)
            add_container_for_agent(c)
            add_container_backends(c)
            cids.append(cid)
            # 略过清理工作
            continue

        # 清理掉失败的容器, 释放核, 释放ip
        current_flask.logger.info('Cleaning failed container (cid=%s)', cid)
        dockerjob.remove_container_by_cid([cid], host)
        [ip.release() for ip in ips]
        # 失败了就得清理掉这个key
        rds.delete(feedback_key)

    publish_to_service_discovery(version.name)
    task.finish_with_result(consts.TASK_SUCCESS, container_ids=cids)
    notifier.pub_success()
    current_flask.logger.info('Task<id=%s>: Done', task_id)
예제 #9
0
파일: task.py 프로젝트: binblee/eru-core
def create_containers_with_macvlan(task_id, ncontainer, nshare, cores, network_ids, spec_ips=None):
    """
    执行task_id的任务. 部署ncontainer个容器, 占用*_core_ids这些核, 绑定到networks这些子网
    """
    current_flask.logger.info('Task<id=%s>: Started', task_id)
    task = Task.get(task_id)
    if not task:
        current_flask.logger.error('Task (id=%s) not found, quit', task_id)
        return

    if spec_ips is None:
        spec_ips = []

    need_network = bool(network_ids)
    networks = Network.get_multi(network_ids)

    notifier = TaskNotifier(task)
    host = task.host
    version = task.version
    entrypoint = task.props['entrypoint']
    env = task.props['env']
    ports = task.props['ports']
    args = task.props['args']
    # use raw
    route = task.props['route']
    image = task.props['image']
    callback_url = task.props['callback_url']
    cpu_shares = int(float(nshare) / host.pod.core_share * 1024) if nshare else 1024

    pub_agent_vlan_key = 'eru:agent:%s:vlan' % host.name
    pub_agent_route_key = 'eru:agent:%s:route' % host.name
    feedback_key = 'eru:agent:%s:feedback' % task_id

    cids = []

    for fcores, pcores in _iter_cores(cores, ncontainer):
        cores_for_one_container = {'full': fcores, 'part': pcores}
        try:
            cid, cname = dockerjob.create_one_container(host, version,
                entrypoint, env, fcores+pcores, ports=ports, args=args,
                cpu_shares=cpu_shares, image=image, need_network=need_network)
        except Exception as e:
            # 写给celery日志看
            print e
            host.release_cores(cores_for_one_container, nshare)
            continue

        if spec_ips:
            ips = [n.acquire_specific_ip(ip) for n, ip in zip(networks, spec_ips)]
        else:
            ips = [n.acquire_ip() for n in networks]
        ips = [i for i in ips if i]
        ip_dict = {ip.vlan_address: ip for ip in ips}

        if ips:
            if ERU_AGENT_API == 'pubsub':
                values = [str(task_id), cid] + ['{0}:{1}'.format(ip.vlan_seq_id, ip.vlan_address) for ip in ips]
                rds.publish(pub_agent_vlan_key, '|'.join(values))
            elif ERU_AGENT_API == 'http':
                agent = get_agent(host)
                ip_list = [(ip.vlan_seq_id, ip.vlan_address) for ip in ips]
                agent.add_container_vlan(cid, str(task_id), ip_list)

        for _ in ips:
            # timeout 15s
            rv = rds.blpop(feedback_key, 15)
            if rv is None:
                break
            # rv is like (feedback_key, 'succ|container_id|vethname|ip')
            succ, _, vethname, vlan_address = rv[1].split('|')
            if succ == '0':
                break
            ip = ip_dict.get(vlan_address, None)
            if ip:
                ip.set_vethname(vethname)

            if route:
                rds.publish(pub_agent_route_key, '%s|%s' % (cid, route))

        else:
            current_flask.logger.info('Creating container (cid=%s, ips=%s)', cid, ips)
            c = Container.create(cid, host, version, cname, entrypoint,
                    cores_for_one_container, env, nshare, callback_url)
            for ip in ips:
                ip.assigned_to_container(c)
            notifier.notify_agent(c)
            add_container_for_agent(c)
            add_container_backends(c)
            cids.append(cid)
            # 略过清理工作
            continue

        # 清理掉失败的容器, 释放核, 释放ip
        current_flask.logger.info('Cleaning failed container (cid=%s)', cid)
        dockerjob.remove_container_by_cid([cid], host)
        host.release_cores(cores_for_one_container, nshare)
        [ip.release() for ip in ips]
        # 失败了就得清理掉这个key
        rds.delete(feedback_key)

    publish_to_service_discovery(version.name)
    task.finish_with_result(consts.TASK_SUCCESS, container_ids=cids)
    notifier.pub_success()

    # 有IO, 丢最后面算了
    falcon_all_graphs(version)
    falcon_all_alarms(version)

    current_flask.logger.info('Task<id=%s>: Done', task_id)
예제 #10
0
def test_container(test_db):
    a = App.get_or_create('app', 'http://git.hunantv.com/group/app.git')
    assert a is not None
    assert a.id == a.user_id

    v = a.add_version(random_sha1())
    assert v is not None
    assert v.app.id == a.id
    assert v.name == a.name
    assert len(v.containers.all()) == 0
    assert len(v.tasks.all()) == 0

    p = Pod.create('pod', 'pod', 10, -1)
    hosts = [
        Host.create(p, random_ipv4(), random_string(prefix='host'),
                    random_uuid(), 4, 4096) for i in range(6)
    ]

    for host in hosts[3:]:
        host.set_public()
    host_ids1 = {h.id for h in hosts[:3]}
    host_ids2 = {h.id for h in hosts[3:]}

    host_cores = centralized_schedule(p, 3, 3, 0)

    #测试没有碎片核的情况
    #获取核
    containers = []
    for (host, count), cores in host_cores.iteritems():
        host.occupy_cores(cores, 0)
        cores_per_container = len(cores['full']) / count
        for i in range(count):
            cid = random_sha1()
            used_cores = {
                'full':
                cores['full'][i * cores_per_container:(i + 1) *
                              cores_per_container]
            }
            c = Container.create(cid,
                                 host,
                                 v,
                                 random_string(),
                                 'entrypoint',
                                 used_cores,
                                 'env',
                                 nshare=0)
            assert c is not None
            containers.append(c)

    for host in p.get_private_hosts():
        full_cores, part_cores = host.get_free_cores()
        assert len(full_cores) == 1
        assert len(part_cores) == 0
        assert len(host.containers.all()) == 1
        assert host.count == 1

    assert len(containers) == 3
    assert len(v.containers.all()) == 3

    for c in containers:
        assert c.host_id in host_ids1
        assert c.host_id not in host_ids2
        assert c.app.id == a.id
        assert c.version.id == v.id
        assert c.is_alive
        assert len(c.full_cores) == 3
        assert len(c.part_cores) == 0
        all_core_labels = sorted([
            '0',
            '1',
            '2',
            '3',
        ])
        used_full_core_labels = [core.label for core in c.full_cores]
        used_part_core_labels = [core.label for core in c.part_cores]
        free_core_labels = [core.label for core in c.host.get_free_cores()[0]]
        assert all_core_labels == sorted(used_full_core_labels +
                                         used_part_core_labels +
                                         free_core_labels)

    #释放核
    for c in containers:
        c.delete()

    assert len(v.containers.all()) == 0
    assert get_max_container_count(p, 3, 0) == 3
    host_cores = centralized_schedule(p, 3, 3, 0)
    assert len(host_cores) == 3

    for host in p.get_private_hosts():
        full_cores, part_cores = host.get_free_cores()
        assert len(full_cores) == 4
        assert len(part_cores) == 0
        assert len(host.containers.all()) == 0
        assert host.count == 4

    #测试有碎片的情况
    #获取核
    host_cores = centralized_schedule(p, 3, 3, 4)
    containers = []
    for (host, count), cores in host_cores.iteritems():
        cores_per_container = len(cores['full']) / count
        host.occupy_cores(cores, 4)
        for i in range(count):
            cid = random_sha1()
            used_cores = {
                'full':
                cores['full'][i * cores_per_container:(i + 1) *
                              cores_per_container],
                'part':
                cores['part']
            }
            # not using a port
            c = Container.create(cid,
                                 host,
                                 v,
                                 random_string(),
                                 'entrypoint',
                                 used_cores,
                                 'env',
                                 nshare=4)
            assert c is not None
            containers.append(c)

    for host in p.get_private_hosts():
        full_cores, part_cores = host.get_free_cores()
        assert len(full_cores) == 0
        assert len(part_cores) == 1
        assert part_cores[0].remain == 6
        assert len(host.containers.all()) == 1
        assert host.count == D('0.6')

    assert len(containers) == 3
    assert len(v.containers.all()) == 3

    for c in containers:
        assert c.host_id in host_ids1
        assert c.host_id not in host_ids2
        assert c.app.id == a.id
        assert c.version.id == v.id
        assert c.is_alive
        assert len(c.full_cores) == 3
        assert len(c.part_cores) == 1
        all_core_labels = sorted([
            '0',
            '1',
            '2',
            '3',
        ])
        used_full_core_labels = [core.label for core in c.full_cores]
        used_part_core_labels = [core.label for core in c.part_cores]
        free_core_labels = [core.label for core in c.host.get_free_cores()[0]]
        assert all_core_labels == sorted(used_full_core_labels +
                                         used_part_core_labels +
                                         free_core_labels)

    #释放核
    for c in containers:
        c.delete()

    assert len(v.containers.all()) == 0
    assert get_max_container_count(p, 3, 0) == 3
    host_cores = centralized_schedule(p, 3, 3, 0)
    assert len(host_cores) == 3

    for host in p.get_private_hosts():
        full_cores, part_cores = host.get_free_cores()
        assert len(full_cores) == 4
        assert len(host.containers.all()) == 0
        assert host.count == 4

    #获取
    host_cores = centralized_schedule(p, 6, 1, 5)
    containers = []
    for (host, count), cores in host_cores.iteritems():
        cores_per_container = len(cores['full']) / count
        for i in range(count):
            cid = random_sha1()
            used_cores = {
                'full':
                cores['full'][i * cores_per_container:(i + 1) *
                              cores_per_container],
                'part':
                cores['part'][i:i + 1],
            }
            host.occupy_cores(used_cores, 5)
            # not using a port
            c = Container.create(cid,
                                 host,
                                 v,
                                 random_string(),
                                 'entrypoint',
                                 used_cores,
                                 'env',
                                 nshare=5)
            assert c is not None
            containers.append(c)

    for host in p.get_private_hosts():
        full_cores, part_cores = host.get_free_cores()
        assert len(full_cores) == 1
        assert len(part_cores) == 0
        assert len(host.containers.all()) == 2
        assert host.count == D('1')

    assert len(containers) == 6
    assert len(v.containers.all()) == 6

    for c in containers:
        assert c.host_id in host_ids1
        assert c.host_id not in host_ids2
        assert c.app.id == a.id
        assert c.version.id == v.id
        assert c.is_alive
        assert len(c.full_cores) == 1
        assert len(c.part_cores) == 1

    ##释放核
    for c in containers:
        c.delete()

    assert len(v.containers.all()) == 0
    assert get_max_container_count(p, 3, 0) == 3
    host_cores = centralized_schedule(p, 3, 3, 0)
    assert len(host_cores) == 3

    for host in p.get_private_hosts():
        full_cores, part_cores = host.get_free_cores()
        assert len(full_cores) == 4
        assert len(part_cores) == 0
        assert len(host.containers.all()) == 0
        assert host.count == 4
예제 #11
0
파일: task.py 프로젝트: timfeirg/eru-core
def create_containers(task_id, ncontainer, nshare, cores, network_ids, spec_ips=None):
    """
    执行task_id的任务. 部署ncontainer个容器, 占用*_core_ids这些核, 绑定到networks这些子网
    """
    _log.info('Task<id=%s>: Started', task_id)
    task = Task.get(task_id)
    if not task:
        _log.error('Task (id=%s) not found, quit', task_id)
        return

    if spec_ips is None:
        spec_ips = []

    need_network = bool(network_ids)
    networks = [ipam.get_pool(n) for n in network_ids]

    notifier = TaskNotifier(task)
    host = task.host
    version = task.version
    entrypoint = task.props['entrypoint']
    env = task.props['env']
    ports = task.props['ports']
    args = task.props['args']
    # use raw
    image = task.props['image']
    callback_url = task.props['callback_url']
    cpu_shares = int(float(nshare) / host.pod.core_share * 1024) if nshare else 1024

    cids = []
    backends = []
    entry = version.appconfig.entrypoints[entrypoint]

    for fcores, pcores in _iter_cores(cores, ncontainer):
        cores_for_one_container = {'full': fcores, 'part': pcores}
        # 在宿主机上创建容器
        try:
            cid, cname = dockerjob.create_one_container(host,
                                                        version,
                                                        entrypoint,
                                                        env,
                                                        fcores + pcores,
                                                        ports=ports, args=args,
                                                        cpu_shares=cpu_shares,
                                                        image=image,
                                                        need_network=need_network)
        except Exception as e:
            # 写给celery日志看
            _log.exception(e)
            host.release_cores(cores_for_one_container, nshare)
            continue

        # 容器记录下来
        c = Container.create(cid, host, version, cname, entrypoint, cores_for_one_container, env, nshare, callback_url)

        # 为容器创建网络栈
        # 同时把各种信息都记录下来
        # 如果失败, 清除掉所有记录和宿主机上的容器
        # 循环下一次尝试
        cidrs = [n.netspace for n in networks]
        if not ipam.allocate_ips(cidrs, cid, spec_ips):
            _clean_failed_containers(cid)
            continue

        notifier.notify_agent(c)
        add_container_for_agent(host, c)
        add_container_backends(c)
        cids.append(cid)
        backends.extend(c.get_backends())

        c.callback_report(status='start')

    health_check = entry.get('health_check', '')
    if health_check and backends:
        urls = [b + health_check for b in backends]
        if not wait_health_check(urls):
            # TODO 这里要么回滚要么报警
            _log.info('Task<id=%s>: Done, but something went error', task_id)
            return

    publish_to_service_discovery(version.name)
    task.finish(consts.TASK_SUCCESS)
    task.reason = 'ok'
    task.container_ids = cids
    notifier.pub_success()

    _log.info('Task<id=%s>: Done', task_id)
예제 #12
0
파일: task.py 프로젝트: timfeirg/eru-core
def create_containers(task_id,
                      ncontainer,
                      nshare,
                      cores,
                      network_ids,
                      spec_ips=None):
    """
    执行task_id的任务. 部署ncontainer个容器, 占用*_core_ids这些核, 绑定到networks这些子网
    """
    _log.info('Task<id=%s>: Started', task_id)
    task = Task.get(task_id)
    if not task:
        _log.error('Task (id=%s) not found, quit', task_id)
        return

    if spec_ips is None:
        spec_ips = []

    need_network = bool(network_ids)
    networks = [ipam.get_pool(n) for n in network_ids]

    notifier = TaskNotifier(task)
    host = task.host
    version = task.version
    entrypoint = task.props['entrypoint']
    env = task.props['env']
    ports = task.props['ports']
    args = task.props['args']
    # use raw
    image = task.props['image']
    callback_url = task.props['callback_url']
    cpu_shares = int(float(nshare) / host.pod.core_share *
                     1024) if nshare else 1024

    cids = []
    backends = []
    entry = version.appconfig.entrypoints[entrypoint]

    for fcores, pcores in _iter_cores(cores, ncontainer):
        cores_for_one_container = {'full': fcores, 'part': pcores}
        # 在宿主机上创建容器
        try:
            cid, cname = dockerjob.create_one_container(
                host,
                version,
                entrypoint,
                env,
                fcores + pcores,
                ports=ports,
                args=args,
                cpu_shares=cpu_shares,
                image=image,
                need_network=need_network)
        except Exception as e:
            # 写给celery日志看
            _log.exception(e)
            host.release_cores(cores_for_one_container, nshare)
            continue

        # 容器记录下来
        c = Container.create(cid, host, version, cname, entrypoint,
                             cores_for_one_container, env, nshare,
                             callback_url)

        # 为容器创建网络栈
        # 同时把各种信息都记录下来
        # 如果失败, 清除掉所有记录和宿主机上的容器
        # 循环下一次尝试
        cidrs = [n.netspace for n in networks]
        if not ipam.allocate_ips(cidrs, cid, spec_ips):
            _clean_failed_containers(cid)
            continue

        notifier.notify_agent(c)
        add_container_for_agent(host, c)
        add_container_backends(c)
        cids.append(cid)
        backends.extend(c.get_backends())

        c.callback_report(status='start')

    health_check = entry.get('health_check', '')
    if health_check and backends:
        urls = [b + health_check for b in backends]
        if not wait_health_check(urls):
            # TODO 这里要么回滚要么报警
            _log.info('Task<id=%s>: Done, but something went error', task_id)
            return

    publish_to_service_discovery(version.name)
    task.finish(consts.TASK_SUCCESS)
    task.reason = 'ok'
    task.container_ids = cids
    notifier.pub_success()

    _log.info('Task<id=%s>: Done', task_id)
예제 #13
0
def create_containers_with_macvlan(task_id, ncontainer, nshare, cores, network_ids, spec_ips=None):
    """
    执行task_id的任务. 部署ncontainer个容器, 占用*_core_ids这些核, 绑定到networks这些子网
    """
    # TODO support part core
    current_flask.logger.info("Task<id=%s>: Started", task_id)
    task = Task.get(task_id)
    if not task:
        current_flask.logger.error("Task (id=%s) not found, quit", task_id)
        return

    if spec_ips is None:
        spec_ips = []

    networks = Network.get_multi(network_ids)

    notifier = TaskNotifier(task)
    host = task.host
    version = task.version
    entrypoint = task.props["entrypoint"]
    env = task.props["env"]
    # use raw
    image = task.props["image"]
    cpu_shares = int(float(nshare) / host.pod.core_share * 1024) if nshare else 1024

    pub_agent_vlan_key = "eru:agent:%s:vlan" % host.name
    feedback_key = "eru:agent:%s:feedback" % task_id

    cids = []

    full_cores, part_cores = cores.get("full", []), cores.get("part", [])
    for fcores, pcores in izip_longest(
        chunked(full_cores, len(full_cores) / ncontainer),
        chunked(part_cores, len(part_cores) / ncontainer),
        fillvalue=[],
    ):
        cores_for_one_container = {"full": fcores, "part": pcores}
        try:
            cid, cname = dockerjob.create_one_container(
                host, version, entrypoint, env, fcores + pcores, cpu_shares, image=image
            )
        except:
            host.release_cores(cores_for_one_container, nshare)
            continue

        if spec_ips:
            ips = [n.acquire_specific_ip(ip) for n, ip in zip(networks, spec_ips)]
        else:
            ips = [n.acquire_ip() for n in networks]
        ips = [i for i in ips if i]
        ip_dict = {ip.vlan_address: ip for ip in ips}

        if ips:
            ident_id = cname.split("_")[-1]
            values = [str(task_id), cid, ident_id] + ["{0}:{1}".format(ip.vlan_seq_id, ip.vlan_address) for ip in ips]
            rds.publish(pub_agent_vlan_key, "|".join(values))

        for _ in ips:
            # timeout 15s
            rv = rds.blpop(feedback_key, 15)
            if rv is None:
                break
            # rv is like (feedback_key, 'succ|container_id|vethname|ip')
            succ, _, vethname, vlan_address = rv[1].split("|")
            if succ == "0":
                break
            ip = ip_dict.get(vlan_address, None)
            if ip:
                ip.set_vethname(vethname)

        else:
            current_flask.logger.info("Creating container (cid=%s, ips=%s)", cid, ips)
            c = Container.create(cid, host, version, cname, entrypoint, cores_for_one_container, env, nshare)
            for ip in ips:
                ip.assigned_to_container(c)
            notifier.notify_agent(cid)
            add_container_for_agent(c)
            add_container_backends(c)
            cids.append(cid)
            # 略过清理工作
            continue

        # 清理掉失败的容器, 释放核, 释放ip
        current_flask.logger.info("Cleaning failed container (cid=%s)", cid)
        dockerjob.remove_container_by_cid([cid], host)
        host.release_cores(cores_for_one_container, nshare)
        [ip.release() for ip in ips]
        # 失败了就得清理掉这个key
        rds.delete(feedback_key)

    publish_to_service_discovery(version.name)
    task.finish_with_result(consts.TASK_SUCCESS, container_ids=cids)
    notifier.pub_success()
    current_flask.logger.info("Task<id=%s>: Done", task_id)
예제 #14
0
def test_container(test_db):
    a = App.get_or_create('app', 'http://git.hunantv.com/group/app.git', '')
    assert a is not None

    v = a.add_version(random_sha1())
    assert v is not None
    assert v.app.id == a.id
    assert v.name == a.name
    assert len(v.containers.all()) == 0
    assert len(v.tasks.all()) == 0

    g = Group.create('group', 'group')
    p = Pod.create('pod', 'pod')
    assert p.assigned_to_group(g)
    hosts = [Host.create(p, random_ipv4(), random_string(prefix='host'),
        random_uuid(), 4, 4096) for i in range(6)]

    for host in hosts[:3]:
        host.assigned_to_group(g)
    host_ids1 = {h.id for h in hosts[:3]}
    host_ids2 = {h.id for h in hosts[3:]}

    assert g.get_max_containers(p, 3) == 3
    host_cores = g.get_free_cores(p, 3, 3)
    assert len(host_cores) == 3

    containers = []
    for (host, count), cores in host_cores.iteritems():
        cores_per_container = len(cores) / count
        for i in range(count):
            cid = random_sha1()
            used_cores = cores[i*cores_per_container:(i+1)*cores_per_container]
            # not using a port
            c = Container.create(cid, host, v, random_string(), 'entrypoint', used_cores, 'env')
            assert c is not None
            containers.append(c)
        host.occupy_cores(cores)

    for host in g.private_hosts.all():
        assert len(host.get_free_cores()) == 1
        assert len(host.containers.all()) == 1
        assert host.count == 1

    assert len(containers) == 3
    assert len(v.containers.all()) == 3

    for c in containers:
        assert c.host_id in host_ids1
        assert c.host_id not in host_ids2
        assert c.app.id == a.id
        assert c.version.id == v.id
        assert c.is_alive
        assert len(c.cores.all()) == 3
        all_core_labels = sorted(['0', '1', '2', '3', ])
        used_core_labels = [core.label for core in c.cores.all()]
        free_core_labels = [core.label for core in c.host.get_free_cores()]
        assert all_core_labels == sorted(used_core_labels + free_core_labels)

    for c in containers:
        c.delete()

    assert len(v.containers.all()) == 0
    assert g.get_max_containers(p, 3) == 3
    host_cores = g.get_free_cores(p, 3, 3)
    assert len(host_cores) == 3

    for host in g.private_hosts.all():
        assert len(host.get_free_cores()) == 4
        assert len(host.containers.all()) == 0
        assert host.count == 0