Beispiel #1
0
def build_and_run_docker(args, job):
    create_infrabox_directories(args, job)

    image_name = None
    if args.tag:
        image_name = args.tag
    else:
        image_name = args.project_name + '_' + job['name']
        image_name = image_name.replace("/", "-")
        image_name = image_name.lower()

    deployments = job.get('deployments', [])
    if deployments:
        for d in deployments:
            target = d.get('target', None)

            if not target and not job.get('build_only', True):
                continue

            build_docker_image(args, job, image_name, target=target)

            new_image_name = "%s/%s:%s" % (d['host'], d['repository'],
                                           d.get('tag', 'build_local'))
            execute(['docker', 'tag', image_name, new_image_name])

    build_docker_image(args, job, image_name)
    if not job.get('build_only', True):
        run_container(args, job, image_name)

        for d in deployments:
            new_image_name = "%s/%s:%s" % (d['host'], d['repository'],
                                           d.get('tag', 'build_local'))
            execute(['docker', 'tag', image_name, new_image_name])
Beispiel #2
0
 def signal_handler(_, __):
     logger.info("Stopping docker containers")
     execute(['docker-compose', '-f', compose_file_new, 'stop'],
             env=env,
             cwd=job['build_context'])
     os.remove(compose_file_new)
     sys.exit(0)
Beispiel #3
0
def build_docker_image(args, job, image_name, target=None):
    # Build the image
    logger.info("Build docker image")

    docker_file = os.path.normpath(
        os.path.join(get_build_context(job, args), job['docker_file']))

    cmd = ['docker', 'build', '-t', image_name, '.', '-f', docker_file]
    if 'build_arguments' in job:
        for name, value in job['build_arguments'].items():
            cmd += ['--build-arg', '%s=%s' % (name, value)]

    if args.build_arg:
        for a in args.build_arg:
            cmd += ['--build-arg', a]

    if not args.build_arg or not any([
            build_arg.startswith("INFRABOX_BUILD_NUMBER=")
            for build_arg in args.build_arg
    ]):
        cmd += ['--build-arg', 'INFRABOX_BUILD_NUMBER=local']

    # memory limit
    if not args.unlimited:
        cmd += ['-m', '%sm' % job['resources']['limits']['memory']]

    if target:
        cmd += ['--target', target]

    execute(cmd, cwd=get_build_context(job, args))
Beispiel #4
0
def tag_docker_image(image_name, deployments):
    new_images = []
    for d in deployments:
        new_image_name = "%s/%s:%s" % (d['host'], d['repository'], d.get('tag', 'build_local'))
        execute(['docker', 'tag', image_name, new_image_name])
        new_images.append(new_image_name)
    return new_images
Beispiel #5
0
def recreate_directories(dirs):
    for d in dirs:
        if os.path.exists(d):
            try:
                shutil.rmtree(d)
            except:
                execute(['docker', 'run', '-v', '%s:/to_delete' % d, 'alpine', 'rm', '-rf', '/to_delete'],
                        ignore_error=True,
                        ignore_output=True)
                shutil.rmtree(d)

        makedirs(d)
Beispiel #6
0
def run_docker_image(args, job):
    create_infrabox_directories(args, job)
    image_name = job['image'].replace('$INFRABOX_BUILD_NUMBER', 'local')

    if job.get('run', True):
        run_container(args, job, image_name)

    deployments = job.get('deployments', [])
    for d in deployments:
        new_image_name = "%s/%s:%s" % (d['host'], d['repository'], d.get('tag', 'build_local'))
        logger.info("Tagging image: %s" % new_image_name)
        execute(['docker', 'tag', image_name, new_image_name])
Beispiel #7
0
Datei: run.py Projekt: metti/cli
def build_and_run_docker(args, job):
    infrabox = create_infrabox_directories(args, job)

    if args.tag:
        image_name = args.tag
    else:
        image_name = args.project_name + '_' + job['name']
        image_name = image_name.replace("/", "-")
        image_name = image_name.lower()

    container_name = job['name'].replace("/", "-")

    # Build the image
    logger.info("Build docker image")
    execute(['docker', 'rm', container_name],
            cwd=args.project_root,
            ignore_error=True,
            ignore_output=True)

    cmd = ['docker', 'build', '-t', image_name, '.', '-f', job['docker_file']]
    if 'build_arguments' in job:
        for name, value in job['build_arguments'].iteritems():
            cmd += ['--build-arg', '%s=%s' % (name, value)]

    execute(cmd, cwd=job['base_path'])

    # Run it
    if 'build_only' in job and not job['build_only']:
        cmd = [
            'docker', 'run', '--name', container_name, '-v',
            '%s:/infrabox' % infrabox
        ]

        for e in args.environment:
            cmd += ['-e', e]

        if 'environment' in job:
            for name, value in job['environment'].iteritems():
                if isinstance(value, dict):
                    continue

                cmd += ['-e', '%s=%s' % (name, value)]

        cmd.append(image_name)

        logger.info("Run docker container")
        execute(cmd, cwd=args.project_root)

        if job.get('commit_after_run', False):
            logger.info("Commiting Container")
            execute(['docker', 'commit', container_name, image_name],
                    cwd=args.project_root)
Beispiel #8
0
def build_and_run_docker(args, job):
    infrabox = create_infrabox_directories(args, job)

    image_name = args.project_name + '_' + job['name']
    image_name = image_name.replace("/", "-")
    image_name = image_name.lower()

    container_name = job['name'].replace("/", "-")

    # Build the image
    logger.info("Build docker image")
    execute(['docker', 'rm', container_name],
            cwd=args.project_root,
            ignore_error=True,
            ignore_output=True)
    execute(
        ['docker', 'build', '-t', image_name, '.', '-f', job['docker_file']],
        cwd=job['base_path'])

    check_username(image_name)

    # Run it
    cmd = [
        'docker', 'run', '--name', container_name, '-v',
        '%s:/infrabox' % infrabox
    ]

    for e in args.environment:
        cmd += ['-e', e]

    cmd.append(image_name)

    logger.info("Run docker container")
    execute(cmd, cwd=args.project_root)

    if job.get('commit_after_run', False):
        logger.info("Commiting Container")
        execute(['docker', 'commit', container_name, image_name],
                cwd=args.project_root)
Beispiel #9
0
def build_and_run_docker_compose(args, job):
    create_infrabox_directories(args, job)

    compose_file = os.path.join(job['infrabox_context'],
                                job['docker_compose_file'])
    compose_file = os.path.normpath(compose_file)
    compose_file_new = compose_file + ".infrabox"

    # rewrite compose file
    compose_file_content = docker_compose.create_from(compose_file)
    for service in compose_file_content['services']:
        create_infrabox_directories(args,
                                    job,
                                    service=service,
                                    services=compose_file_content['services'],
                                    compose_file=compose_file)

        volumes = []
        for v in compose_file_content['services'][service].get('volumes', []):
            if isinstance(v, basestring):
                v = v.replace('/infrabox/context', args.project_root)
            volumes.append(v)

        for name, path in job['directories'].items():
            volumes.append(str('%s:/infrabox/%s' % (path, name)))

        # Mount /infrabox/context to the build context of the service if build.context
        # is set in the compose file for the service
        service_build = compose_file_content['services'][service].get(
            'build', None)
        if service_build:
            service_build_context = service_build.get('context', None)
            if service_build_context:
                build_context = os.path.join(os.path.dirname(compose_file),
                                             service_build_context)
                volumes += ['%s:/infrabox/context' % str(build_context)]
            else:
                volumes += ['%s:/infrabox/context' % args.project_root]
        else:
            volumes += ['%s:/infrabox/context' % args.project_root]
        volumes = list(dict.fromkeys(volumes))

        compose_file_content['services'][service]['volumes'] = volumes

        build = compose_file_content['services'][service].get('build', None)
        if build:
            if not build.get('args', None):
                build['args'] = []
            elif not any([
                    build_arg.startswith("INFRABOX_BUILD_NUMBER=")
                    for build_arg in build['args']
            ]):
                build['args'] += ['INFRABOX_BUILD_NUMBER=local']

    with open(compose_file_new, "w+") as out:
        yaml.dump(compose_file_content, out, default_flow_style=False)

    env = {
        'PATH': os.environ['PATH'],
        'INFRABOX_CLI': 'true',
        'INFRABOX_BUILD_NUMBER': 'local'
    }

    if 'environment' in job:
        for name, value in job['environment'].items():
            if isinstance(value, dict):
                env[name] = get_secret(args, value['$secret'])
            else:
                env[name] = value

    if not args.no_rm:
        execute([
            'docker-compose', '-p', args.project_name, '-f', compose_file_new,
            'rm', '-f'
        ],
                env=env,
                cwd=job['build_context'])

    execute([
        'docker-compose', '-p', args.project_name, '-f', compose_file_new,
        'build'
    ],
            env=env,
            cwd=job['build_context'])

    def signal_handler(_, __):
        logger.info("Stopping docker containers")
        execute(['docker-compose', '-f', compose_file_new, 'stop'],
                env=env,
                cwd=job['build_context'])
        os.remove(compose_file_new)
        sys.exit(0)

    signal.signal(signal.SIGINT, signal_handler)

    execute([
        'docker-compose', '-p', args.project_name, '-f', compose_file_new,
        'up', '--abort-on-container-exit'
    ],
            env=env)
    signal.signal(signal.SIGINT, signal.SIG_DFL)

    # Print the return code of all the containers
    execute([
        'docker-compose', '-p', args.project_name, '-f', compose_file_new, 'ps'
    ],
            env=env,
            cwd=job['build_context'])

    os.remove(compose_file_new)
Beispiel #10
0
def build_and_run_docker(args, job):
    create_infrabox_directories(args, job)

    if args.tag:
        image_name = args.tag
    else:
        image_name = args.project_name + '_' + job['name']
        image_name = image_name.replace("/", "-")
        image_name = image_name.lower()

    container_name = 'ib_' + job['name'].replace("/", "-")

    # Build the image
    logger.info("Build docker image")

    if not args.no_rm:
        execute(['docker', 'rm', container_name],
                cwd=args.project_root,
                ignore_error=True,
                ignore_output=True)

    docker_file = os.path.normpath(
        os.path.join(get_build_context(job, args), job['docker_file']))

    cmd = ['docker', 'build', '-t', image_name, '.', '-f', docker_file]
    if 'build_arguments' in job:
        for name, value in job['build_arguments'].items():
            cmd += ['--build-arg', '%s=%s' % (name, value)]

    # memory limit
    cmd += ['-m', '%sm' % job['resources']['limits']['memory']]

    execute(cmd, cwd=get_build_context(job, args))

    # Tag images if deployments are configured
    deployments = job.get('deployments', [])
    for d in deployments:
        new_image_name = "%s/%s:%s" % (d['host'], d['repository'],
                                       d.get('tag', 'local'))
        logger.info("Tagging image: %s" % new_image_name)
        execute(['docker', 'tag', image_name, new_image_name])

    if job.get('build_only', True):
        return

    # Run the continer
    cmd = ['docker', 'run', '--name', container_name]

    # Security context
    security_context = job.get('security_context', {})
    caps = security_context.get('capabilities', {}).get('add', [])

    if caps:
        cmd += ['--cap-add=' + ','.join(caps)]

    privileged = security_context.get('privileged', False)
    if privileged:
        cmd += ['--privileged', '-v', '/tmp/docker:/var/lib/docker']

    for name, path in job['directories'].items():
        cmd += ['-v', '%s:/infrabox/%s' % (path, name)]

    cmd += ['-m', '%sm' % job['resources']['limits']['memory']]

    if 'environment' in job:
        for name, value in job['environment'].items():
            if isinstance(value, dict):
                cmd += [
                    '-e',
                    '%s=%s' % (name, get_secret(args, value['$secret']))
                ]
            else:
                cmd += ['-e', '%s=%s' % (name, value)]

    cmd += ['-e', 'INFRABOX_CLI=true']

    if os.name != 'nt':
        cmd += ['-e', 'INFRABOX_UID=%s' % os.geteuid()]
        cmd += ['-e', 'INFRABOX_GID=%s' % os.getegid()]

    # memory limit
    cmd += ['-m', '%sm' % job['resources']['limits']['memory']]

    # CPU limit
    cmd += ['--cpus', str(job['resources']['limits']['cpu'])]

    logger.info("Run docker container")
    cmd.append(image_name)
    execute(cmd, cwd=args.project_root)

    logger.info("Commiting Container")
    execute(['docker', 'commit', container_name, image_name],
            cwd=args.project_root)
Beispiel #11
0
def build_and_run_docker_compose(args, job):
    create_infrabox_directories(args, job)

    compose_file = os.path.join(job['infrabox_context'],
                                job['docker_compose_file'])
    compose_file = os.path.normpath(compose_file)
    compose_file_new = compose_file + ".infrabox"

    # rewrite compose file
    compose_file_content = docker_compose.create_from(compose_file)
    for service in compose_file_content['services']:
        create_infrabox_directories(args,
                                    job,
                                    service=service,
                                    services=compose_file_content['services'],
                                    compose_file=compose_file)

        volumes = []
        for v in compose_file_content['services'][service].get('volumes', []):
            v = v.replace('/infrabox/context', args.project_root)
            volumes.append(v)

        for name, path in job['directories'].items():
            volumes.append(str('%s:/infrabox/%s' % (path, name)))

        compose_file_content['services'][service]['volumes'] = volumes

    with open(compose_file_new, "w+") as out:
        yaml.dump(compose_file_content, out, default_flow_style=False)

    env = {'PATH': os.environ['PATH'], 'INFRABOX_CLI': 'true'}

    if 'environment' in job:
        for name, value in job['environment'].items():
            if isinstance(value, dict):
                env[name] = get_secret(args, value['$secret'])
            else:
                env[name] = value

    if not args.no_rm:
        execute([
            'docker-compose', '-p', args.project_name, '-f', compose_file_new,
            'rm', '-f'
        ],
                env=env,
                cwd=job['build_context'])

    execute([
        'docker-compose', '-p', args.project_name, '-f', compose_file_new,
        'build'
    ],
            env=env,
            cwd=job['build_context'])

    def signal_handler(_, __):
        logger.info("Stopping docker containers")
        execute(['docker-compose', '-f', compose_file_new, 'stop'],
                env=env,
                cwd=job['build_context'])
        os.remove(compose_file_new)
        sys.exit(0)

    signal.signal(signal.SIGINT, signal_handler)

    execute([
        'docker-compose', '-p', args.project_name, '-f', compose_file_new,
        'up', '--abort-on-container-exit'
    ],
            env=env)
    signal.signal(signal.SIGINT, signal.SIG_DFL)

    os.remove(compose_file_new)
Beispiel #12
0
def get_job_list(data, args, parents=None, infrabox_context=None):
    jobs = []

    if not parents:
        parents = []

    parent_name = get_parent_name(parents)

    for job in data['jobs']:
        job['id'] = str(uuid.uuid4())
        job['parents'] = parents
        job['infrabox_context'] = os.path.normpath(infrabox_context)

        if 'build_context' in job:
            job['build_context'] = os.path.normpath(os.path.join(infrabox_context, job['build_context']))
        else:
             job['build_context'] = os.path.normpath(infrabox_context)

        if parent_name != '':
            job['name'] = parent_name + "/" + job['name']

            deps = job.get('depends_on', [])
            for x in range(0, len(deps)):
                dep = deps[x]
                if isinstance(dep, dict):
                    dep = dep['job']

                deps[x] = parent_name + "/" + dep

        rewrite_job_dependencies(job)

        job_name = job['name']

        if job['type'] != "workflow" and job['type'] != 'git':
            jobs.append(job)
            continue

        new_parents = parents[:]
        new_parents.append(job_name)

        if job['type'] == "git":
            repo_path = os.path.join('/tmp', job_name)
            clone_branch = job.get('branch', None)
            execute(['rm', '-rf', repo_path])
            if clone_branch:
                execute(['git', 'clone', '--depth=50', '--branch', clone_branch, job['clone_url'], repo_path])
            else:
                execute(['git', 'clone', '--depth=50', job['clone_url'], repo_path])
            execute(['git', 'config', 'remote.origin.url', job['clone_url']], cwd=repo_path)
            execute(['git', 'config', 'remote.origin.fetch', '+refs/heads/*:refs/remotes/origin/*'], cwd=repo_path)
            execute(['git', 'fetch', 'origin', job['commit']], cwd=repo_path)

            execute(['git', 'checkout', job['commit']], cwd=repo_path)

            ib_path = os.path.join(repo_path, job.get('infrabox_file', 'infrabox.json'))
            if not os.path.exists(ib_path):
                ib_path = os.path.join(repo_path, job.get('infrabox_file', 'infrabox.yaml'))

            data = load_infrabox_file(ib_path)
            sub = get_job_list(data, args, new_parents,
                               infrabox_context=os.path.dirname(ib_path))

            # Set the build context to dirname of the infrabox.json
            # if not build context is specified
            for s in sub:
                if 'build_context' not in s:
                    s['build_context'] = os.path.normpath(os.path.dirname(ib_path))

        else:
            p = os.path.join(infrabox_context, job['infrabox_file'])
            p = os.path.normpath(p)
            data = load_infrabox_file(p)
            sub = get_job_list(data, args, new_parents,
                               infrabox_context=os.path.dirname(p))

        # every sub job which does not have a parent
        # should be a child of the current job
        job_with_children = {}
        for s in sub:
            deps = s.get('depends_on', [])
            if not deps:
                s['depends_on'] = job.get('depends_on', [])

            for d in deps:
                job_with_children[d['job']] = True

        jobs += sub

        # add a wait job to all sub jobs
        # which don't have a child, so we have
        # one 'final' job
        final_job = {
            "type": "wait",
            "name": job_name,
            "depends_on": [],
            "id": str(uuid.uuid4()),
            "parents": new_parents
        }

        for s in sub:
            sub_name = s['name']
            if sub_name not in job_with_children:
                final_job['depends_on'].append({"job": sub_name, "on": ["finished"]})

        jobs.append(final_job)

    return jobs
Beispiel #13
0
def build_and_run_docker(args, job):
    create_infrabox_directories(args, job)

    if args.tag:
        image_name = args.tag
    else:
        image_name = args.project_name + '_' + job['name']
        image_name = image_name.replace("/", "-")
        image_name = image_name.lower()

    container_name = 'ib_' + job['name'].replace("/", "-")

    # Build the image
    logger.info("Build docker image")

    if not args.no_rm:
        execute(['docker', 'rm', container_name],
                cwd=args.project_root,
                ignore_error=True,
                ignore_output=True)

    cmd = ['docker', 'build', '-t', image_name, '.', '-f', job['docker_file']]
    if 'build_arguments' in job:
        for name, value in job['build_arguments'].iteritems():
            cmd += ['--build-arg', '%s=%s' % (name, value)]

    execute(cmd, cwd=job['base_path'])

    if 'build_only' not in job:
        return

    if job['build_only']:
        return

    cmd = ['docker', 'run', '--name', container_name]
    caps = job.get('security_context', {}).get('capabilities',
                                               {}).get('add', [])
    if caps:
        cmd += ['--cap-add=' + ','.join(caps)]

    for name, path in job['directories'].items():
        cmd += ['-v', '%s:/infrabox/%s' % (path, name)]

    cmd += ['-v', '/var/run/docker.sock:/var/run/docker.sock']
    cmd += ['-m', '%sm' % job['resources']['limits']['memory']]

    if 'environment' in job:
        for name, value in job['environment'].iteritems():
            if isinstance(value, dict):
                cmd += [
                    '-e',
                    '%s=%s' % (name, get_secret(args, value['$secret']))
                ]
            else:
                cmd += ['-e', '%s=%s' % (name, value)]

    cmd += ['-e', 'INFRABOX_CLI=true']

    if os.name != 'nt':
        cmd += ['-e', 'INFRABOX_UID=%s' % os.geteuid()]
        cmd += ['-e', 'INFRABOX_GID=%s' % os.getegid()]

    cmd.append(image_name)

    logger.info("Run docker container")
    execute(cmd, cwd=args.project_root)

    logger.info("Commiting Container")
    execute(['docker', 'commit', container_name, image_name],
            cwd=args.project_root)
Beispiel #14
0
def get_job_list(data, args, parents=[], base_path=None):
    jobs = []
    parent_name = get_parent_name(parents)

    for job in data['jobs']:
        job['id'] = str(uuid.uuid4())
        job['avg_duration'] = 0
        job['parents'] = parents
        job['base_path'] = base_path

        if parent_name != '':
            job['name'] = parent_name + "/" + job['name']

            deps = job.get('depends_on', [])
            for x in range(0, len(deps)):
                deps[x] = parent_name + "/" + deps[x]

        job_name = job['name']

        if job['type'] != "workflow" and job['type'] != 'git':
            jobs.append(job)
            continue

        new_parents = parents[:]
        new_parents.append(job_name)

        if job['type'] == "git":
            repo_path = os.path.join('/tmp', job_name)
            execute(['rm', '-rf', repo_path])
            execute(
                ['git', 'clone', '--depth=50', job['clone_url'], repo_path])
            execute(['git', 'checkout', job['commit']], cwd=repo_path)

            data = load_infrabox_json(os.path.join(repo_path, 'infrabox.json'))
            sub = get_job_list(data, args, new_parents, base_path=repo_path)
        else:
            p = os.path.join(base_path, job['infrabox_file'])
            bp = os.path.dirname(p)
            data = load_infrabox_json(p)
            sub = get_job_list(data, args, new_parents, base_path=bp)

        # every sub job which does not have a parent
        # should be a child of the current job
        job_with_children = {}
        for s in sub:
            deps = s.get('depends_on', [])
            if len(deps) == 0:
                s['depends_on'] = job.get('depends_on', [])

            for d in deps:
                job_with_children[d] = True

        jobs += sub

        # add a wait job to all sub jobs
        # which don't have a child, so we have
        # one 'final' job
        final_job = {
            "type": "wait",
            "name": job_name,
            "depends_on": [],
            "id": str(uuid.uuid4()),
            "parents": new_parents
        }

        for s in sub:
            sub_name = s['name']
            if sub_name not in job_with_children:
                final_job['depends_on'].append(sub_name)

        jobs.append(final_job)

    return jobs
Beispiel #15
0
def build_and_run_docker_compose(args, job):
    compose_file = os.path.join(job['base_path'], job['docker_compose_file'])
    compose_file_new = compose_file + ".infrabox"

    # rewrite compose file
    compose_file_content = docker_compose.create_from(compose_file)
    for service in compose_file_content['services']:
        infrabox = create_infrabox_directories(args, job, service=service)
        compose_file_content['services'][service]['volumes'] = [
            "%s:/infrabox" % str(infrabox)
        ]

    with open(compose_file_new, "w+") as out:
        yaml.dump(compose_file_content, out, default_flow_style=False)

    env = {"PATH": os.environ['PATH']}

    for e in args.environment:
        s = e.split("=")
        env[s[0]] = s[1]

    if args.clean:
        execute([
            'docker-compose', '-p', args.project_name, '-f', compose_file_new,
            'rm', '-f'
        ],
                env=env,
                cwd=job['base_path'])

    execute([
        'docker-compose', '-p', args.project_name, '-f', compose_file_new,
        'build'
    ],
            env=env,
            cwd=job['base_path'])

    pname = args.project_name.replace("-", "")
    image_names = subprocess.check_output(
        "docker images | grep %s | awk '{print $1;}'" % pname, shell=True)

    for l in image_names.splitlines():
        check_username(l)

    def signal_handler(_, __):
        logger.info("Stopping docker containers")
        execute(['docker-compose', '-f', compose_file_new, 'stop'],
                env=env,
                cwd=job['base_path'])
        os.remove(compose_file_new)
        sys.exit(0)

    signal.signal(signal.SIGINT, signal_handler)

    execute([
        'docker-compose', '-p', args.project_name, '-f', compose_file_new,
        'up', '--abort-on-container-exit'
    ],
            env=env)
    signal.signal(signal.SIGINT, signal.SIG_DFL)

    os.remove(compose_file_new)
Beispiel #16
0
def run_container(args, job, image_name):
    container_name = 'ib_' + job['name'].replace("/", "-")

    if not args.no_rm:
        execute(['docker', 'rm', container_name],
                cwd=args.project_root,
                ignore_error=True,
                ignore_output=True)

    # Run the continer
    cmd = ['docker', 'run', '--name', container_name]

    # Security context
    security_context = job.get('security_context', {})

    privileged = security_context.get('privileged', False)
    if privileged:
        cmd += ['--privileged', '-v', '/tmp/docker:/var/lib/docker']

    for name, path in job['directories'].items():
        cmd += ['-v', '%s:/infrabox/%s' % (path, name)]

    if 'environment' in job:
        for name, value in job['environment'].items():
            if isinstance(value, dict):
                cmd += [
                    '-e',
                    '%s=%s' % (name, get_secret(args, value['$secret']))
                ]
            else:
                cmd += ['-e', '%s=%s' % (name, value)]

    cmd += ['-e', 'INFRABOX_CLI=true']

    if args.env:
        for e in args.env:
            cmd += ['-e', e]

    if args.env_file:
        cmd += ['--env-file', args.env_file]

    if os.name != 'nt':
        cmd += ['-e', 'INFRABOX_UID=%s' % os.geteuid()]
        cmd += ['-e', 'INFRABOX_GID=%s' % os.getegid()]

    if not args.unlimited:
        # memory limit
        cmd += ['-m', '%sm' % job['resources']['limits']['memory']]

        # CPU limit
        cmd += ['--cpus', str(job['resources']['limits']['cpu'])]

    cmd.append(image_name)

    if (job['type'] == 'docker-image' or job['type'] == 'docker') and job.get(
            'command', None):
        cmd += job['command']

    logger.info("Run docker container")
    try:
        execute(cmd, cwd=args.project_root)
    except:
        try:
            execute(['docker', 'stop', container_name])
        except:
            pass
        raise

    logger.info("Commiting Container")
    execute(['docker', 'commit', container_name, image_name],
            cwd=args.project_root)
Beispiel #17
0
Datei: run.py Projekt: metti/cli
def build_and_run_docker_compose(args, job):
    compose_file = os.path.join(job['base_path'], job['docker_compose_file'])
    compose_file_new = compose_file + ".infrabox"

    # rewrite compose file
    compose_file_content = docker_compose.create_from(compose_file)
    for service in compose_file_content['services']:
        infrabox = create_infrabox_directories(args, job, service=service)
        compose_file_content['services'][service]['volumes'] = [
            "%s:/infrabox" % str(infrabox)
        ]

    with open(compose_file_new, "w+") as out:
        yaml.dump(compose_file_content, out, default_flow_style=False)

    env = {"PATH": os.environ['PATH']}

    for e in args.environment:
        s = e.split("=")
        env[s[0]] = s[1]

    if 'environment' in job:
        for name, value in job['environment'].iteritems():
            if isinstance(value, dict):
                continue

            env[name] = value

    if args.clean:
        execute([
            'docker-compose', '-p', args.project_name, '-f', compose_file_new,
            'rm', '-f'
        ],
                env=env,
                cwd=job['base_path'])

    execute([
        'docker-compose', '-p', args.project_name, '-f', compose_file_new,
        'build'
    ],
            env=env,
            cwd=job['base_path'])

    def signal_handler(_, __):
        logger.info("Stopping docker containers")
        execute(['docker-compose', '-f', compose_file_new, 'stop'],
                env=env,
                cwd=job['base_path'])
        os.remove(compose_file_new)
        sys.exit(0)

    signal.signal(signal.SIGINT, signal_handler)

    execute([
        'docker-compose', '-p', args.project_name, '-f', compose_file_new,
        'up', '--abort-on-container-exit'
    ],
            env=env)
    signal.signal(signal.SIGINT, signal.SIG_DFL)

    os.remove(compose_file_new)