def hostcmd_run(base_path, project_name, engine_name, vars_files=None, cache=True, ask_vault_pass=False, **kwargs): assert_initialized(base_path) logger.debug('Got extra args to `run` command', arguments=kwargs) config = get_config(base_path, vars_files=vars_files, engine_name=engine_name, project_name=project_name) if not kwargs['production']: config.set_env('dev') logger.debug('hostcmd_run configuration', config=config.__dict__) engine_obj = load_engine(['RUN'], engine_name, config.project_name, config['services'], **kwargs) remove_existing_container(engine_obj, 'conductor', remove_volumes=True) params = { 'deployment_output_path': config.deployment_path, 'host_user_uid': os.getuid(), 'host_user_gid': os.getgid(), 'settings': config.get('settings', {}) } if kwargs: params.update(kwargs) logger.debug('Params passed to conductor for run', params=params) if ask_vault_pass: params['vault_password'] = getpass.getpass(u"Enter the vault password: ") engine_obj.await_conductor_command( 'run', dict(config), base_path, params, save_container=config.save_conductor)
def hostcmd_deploy(base_path, project_name, engine_name, vars_files=None, cache=True, vault_files=None, **kwargs): assert_initialized(base_path) config = get_config(base_path, vars_files=vars_files, engine_name=engine_name, project_name=project_name, vault_files=vault_files) local_images = kwargs.get('local_images') output_path = kwargs.pop('deployment_output_path', None) or config.deployment_path engine_obj = load_engine(['LOGIN', 'PUSH', 'DEPLOY'], engine_name, config.project_name, config['services'], **kwargs) params = { 'deployment_output_path': os.path.normpath(os.path.abspath(os.path.expanduser(output_path))), 'host_user_uid': os.getuid(), 'host_user_gid': os.getgid(), 'settings': config.get('settings', {}), } if kwargs: params.update(kwargs) if not local_images: push_options = push_images(base_path, config.image_namespace, engine_obj, config, save_conductor=False, **params) params.update(push_options) params['vault_files'] = config.vault_files engine_obj.await_conductor_command( 'deploy', dict(config), base_path, params, save_container=config.save_conductor)
def conductorcmd_deploy(engine_name, project_name, services, **kwargs): uid, gid = kwargs.get('host_user_uid', 1), kwargs.get('host_user_gid', 1) engine = load_engine(['DEPLOY'], engine_name, project_name, services, **kwargs) logger.info(u'Engine integration loaded. Preparing deploy.', engine=engine.display_name) # Verify all images are built for service_name, service_defn in services.items(): if service_defn.get('roles'): logger.info(u'Verifying image for %s', service_name) image_id = engine.get_latest_image_id_for_service(service_name) if not image_id: msg = u'Missing image for {}. Run "ansible-container build" to (re)create it.'.format(service_name) logger.error(msg, service=service_name) raise RuntimeError(msg) deployment_output_path = kwargs.get('deployment_output_path') playbook = engine.generate_orchestration_playbook(**kwargs) engine.pre_deployment_setup(project_name, services, **kwargs) try: with open(os.path.join(deployment_output_path, '%s.yml' % project_name), 'w') as ofs: ofs.write(ruamel.yaml.round_trip_dump(playbook, indent=4, block_seq_indent=2, default_flow_style=False)) except OSError: logger.error(u'Failure writing deployment playbook', exc_info=True) raise set_path_ownership(deployment_output_path, uid, gid)
def hostcmd_destroy(base_path, project_name, engine_name, vars_files=None, cache=True, **kwargs): assert_initialized(base_path) logger.debug('Got extra args to `destroy` command', arguments=kwargs) config = get_config(base_path, vars_files=vars_files, engine_name=engine_name, project_name=project_name) if not kwargs['production']: config.set_env('dev') engine_obj = load_engine(['RUN'], engine_name, config.project_name, config['services'], **kwargs) remove_existing_container(engine_obj, 'conductor', remove_volumes=True) params = { 'deployment_output_path': config.deployment_path, 'host_user_uid': os.getuid(), 'host_user_gid': os.getgid(), 'settings': config.get('settings', {}) } if kwargs: params.update(kwargs) engine_obj.await_conductor_command('destroy', dict(config), base_path, params, save_container=config.save_conductor)
def conductorcmd_deploy(engine_name, project_name, services, **kwargs): engine = load_engine(['DEPLOY'], engine_name, project_name, services, **kwargs) logger.info(u'Engine integration loaded. Preparing deploy.', engine=engine.display_name) # Verify all images are built for service_name in services: logger.info(u'Verifying image for %s', service_name) image_id = engine.get_latest_image_id_for_service(service_name) if not image_id: logger.error( u'Missing image. Run "ansible-container build" ' u'to (re)create it.', service=service_name) raise RuntimeError(u'Run failed.') logger.debug("conductorcmd_deploy", kwargs=kwargs) deployment_output_path = kwargs.get('deployment_output_path') playbook = engine.generate_orchestration_playbook(**kwargs) try: with open( os.path.join(deployment_output_path, '%s.yml' % project_name), 'w') as ofs: ofs.write( ruamel.yaml.round_trip_dump(playbook, indent=4, block_seq_indent=2, default_flow_style=False)) except OSError: logger.error(u'Failure writing deployment playbook', exc_info=True) raise
def hostcmd_push(base_path, project_name, engine_name, vars_files=None, **kwargs): """ Push images to a registry. Requires authenticating with the registry prior to starting the push. If your engine's config file does not already contain an authorization for the registry, pass username and/or password. If you exclude password, you will be prompted. """ assert_initialized(base_path) config = get_config(base_path, vars_files=vars_files, engine_name=engine_name, project_name=project_name) engine_obj = load_engine(['LOGIN', 'PUSH'], engine_name, config.project_name, config['services'], **kwargs) logger.debug('PROJECT NAME', project_name=config.project_name) push_images(base_path, config.image_namespace, engine_obj, config, save_conductor=config.save_conductor, **kwargs)
def hostcmd_destroy(base_path, project_name, engine_name, var_file=None, cache=True, **kwargs): assert_initialized(base_path) logger.debug('Got extra args to `destroy` command', arguments=kwargs) config = get_config(base_path, var_file=var_file, engine_name=engine_name) engine_obj = load_engine(['RUN'], engine_name, project_name or os.path.basename(base_path), config['services'], **kwargs) remove_existing_container(engine_obj, 'conductor', remove_volumes=True) params = { 'deployment_output_path': config.deployment_path, 'host_user_uid': os.getuid(), 'host_user_gid': os.getgid(), } if config.get('settings', {}).get('k8s_auth'): params['k8s_auth'] = config['settings']['k8s_auth'] if kwargs: params.update(kwargs) params.update(kwargs) engine_obj.await_conductor_command('destroy', dict(config), base_path, params, save_container=config.get( 'settings', {}).get('save_conductor_container', False))
def conductorcmd_push(engine_name, project_name, services, **kwargs): """ Push images to a registry """ username = kwargs.pop('username') password = kwargs.pop('password') email = kwargs.pop('email') url = kwargs.pop('url') namespace = kwargs.pop('namespace') tag = kwargs.pop('tag') config_path = kwargs.pop('config_path') engine = load_engine(['PUSH', 'LOGIN'], engine_name, project_name, services) logger.info(u'Engine integration loaded. Preparing push.', engine=engine.display_name) # Verify that we can authenticate with the registry username, password = engine.login(username, password, email, url, config_path) repo_data = { 'url': url, 'namespace': namespace or username, 'tag': tag, 'username': username, 'password': password } # Push each image that has been built using Ansible roles for service_name, service_config in services.items(): if service_config.get('roles'): # if the service has roles, it's an image we should push image_id = engine.get_latest_image_id_for_service(service_name) engine.push(image_id, service_name, repo_data)
def hostcmd_restart(base_path, project_name, engine_name, force=False, services=[], **kwargs): config = get_config(base_path, engine_name=engine_name) engine_obj = load_engine(['RUN'], engine_name, project_name or os.path.basename(base_path), config['services'], **kwargs) params = { 'deployment_output_path': config.deployment_path, 'host_user_uid': os.getuid(), 'host_user_gid': os.getgid(), } if config.get('settings', {}).get('k8s_auth'): params['k8s_auth'] = config['settings']['k8s_auth'] if config.get('volumes'): params['volumes'] = config['volumes'] if kwargs: params.update(kwargs) params.update(kwargs) engine_obj.await_conductor_command('restart', dict(config), base_path, params, save_container=config.get( 'settings', {}).get('save_conductor_container', False))
def conductorcmd_deploy(engine_name, project_name, services, **kwargs): engine = load_engine(['DEPLOY'], engine_name, project_name, services, **kwargs) logger.info(u'Engine integration loaded. Preparing deploy.', engine=engine.display_name) # Verify all images are built for service_name in services: if not services[service_name].get('roles'): continue logger.info(u'Verifying image for %s', service_name) image_id = engine.get_latest_image_id_for_service(service_name) if not image_id: logger.error(u'Missing image. Run "ansible-container build" ' u'to (re)create it.', service=service_name) raise RuntimeError(u'Run failed.') logger.debug("conductorcmd_deploy", kwargs=kwargs) deployment_output_path = kwargs.get('deployment_output_path') playbook = engine.generate_orchestration_playbook(**kwargs) try: with open(os.path.join(deployment_output_path, '%s.yml' % project_name), 'w') as ofs: ofs.write(ruamel.yaml.round_trip_dump(playbook, indent=4, block_seq_indent=2, default_flow_style=False)) except OSError: logger.error(u'Failure writing deployment playbook', exc_info=True) raise
def hostcmd_destroy(base_path, project_name, engine_name, var_file=None, cache=True, **kwargs): assert_initialized(base_path) logger.debug('Got extra args to `destroy` command', arguments=kwargs) config = get_config(base_path, var_file=var_file, engine_name=engine_name) engine_obj = load_engine(['RUN'], engine_name, project_name or os.path.basename(base_path), config['services'], **kwargs) remove_existing_container(engine_obj, 'conductor', remove_volumes=True) params = { 'deployment_output_path': config.deployment_path, 'host_user_uid': os.getuid(), 'host_user_gid': os.getgid(), } if config.get('settings', {}).get('k8s_auth'): params['k8s_auth'] = config['settings']['k8s_auth'] if kwargs: params.update(kwargs) params.update(kwargs) engine_obj.await_conductor_command( 'destroy', dict(config), base_path, params, save_container=config.get('settings', {}).get('save_conductor_container', False))
def hostcmd_restart(base_path, project_name, engine_name, vars_files=None, force=False, services=[], **kwargs): config = get_config(base_path, vars_files=vars_files, engine_name=engine_name, project_name=project_name) if not kwargs['production']: config.set_env('dev') engine_obj = load_engine(['RUN'], engine_name, config.project_name, config['services'], **kwargs) params = { 'deployment_output_path': config.deployment_path, 'host_user_uid': os.getuid(), 'host_user_gid': os.getgid(), 'settings': config.get('settings', {}) } if kwargs: params.update(kwargs) engine_obj.await_conductor_command('restart', dict(config), base_path, params, save_container=config.save_conductor)
def hostcmd_build(base_path, project_name, engine_name, var_file=None, **kwargs): config = get_config(base_path, var_file=var_file, engine_name=engine_name) engine_obj = load_engine(['BUILD', 'RUN'], engine_name, project_name or os.path.basename(base_path), config['services'], **kwargs) conductor_container_id = engine_obj.get_container_id_for_service('conductor') conductor_image_id = engine_obj.get_latest_image_id_for_service('conductor') if engine_obj.service_is_running('conductor'): engine_obj.stop_container(conductor_container_id, forcefully=True) if conductor_image_id is None or not kwargs.get('devel'): #TODO once we get a conductor running, figure out how to know it's running if engine_obj.CAP_BUILD_CONDUCTOR: engine_obj.build_conductor_image( base_path, config.get('settings', {}).get('conductor_base', DEFAULT_CONDUCTOR_BASE), cache=kwargs['cache'] ) else: logger.warning(u'%s does not support building the Conductor image.', engine_obj.display_name, engine=engine_obj.display_name) if conductor_container_id: engine_obj.delete_container(conductor_container_id) logger.debug('Config settings', config=config, rawsettings=config.get('settings'), tconf=type(config), settings=config.get('settings', {})) save_container = config.get('settings', {}).get('save_conductor_container', False) if kwargs.get('save_conductor_container'): # give precedence to CLI option save_container = True engine_obj.await_conductor_command( 'build', dict(config), base_path, kwargs, save_container=save_container)
def hostcmd_deploy(base_path, project_name, engine_name, var_file=None, cache=True, **kwargs): assert_initialized(base_path) logger.debug('Got extra args to `deploy` command', arguments=kwargs) config = get_config(base_path, var_file=var_file, engine_name=engine_name) local_images = kwargs.get('local_images') output_path = kwargs.pop('deployment_output_path', None) or config.deployment_path engine_obj = load_engine(['LOGIN', 'PUSH', 'DEPLOY'], engine_name, project_name or os.path.basename(base_path), config['services'], **kwargs) params = { 'deployment_output_path': os.path.normpath(os.path.expanduser(output_path)), 'host_user_uid': os.getuid(), 'host_user_gid': os.getgid(), } if config.get('settings', {}).get('k8s_auth'): params['k8s_auth'] = config['settings']['k8s_auth'] if kwargs: params.update(kwargs) if not local_images: url, namespace = push_images(base_path, engine_obj, config, save_conductor=False, **params) params['url'] = url params['namespace'] = namespace engine_obj.await_conductor_command( 'deploy', dict(config), base_path, params, save_container=config.get('settings', {}).get('save_conductor_container', False))
def conductorcmd_push(engine_name, project_name, services, **kwargs): """ Push images to a registry """ username = kwargs.pop('username') password = kwargs.pop('password') email = kwargs.pop('email') url = kwargs.pop('url') namespace = kwargs.pop('namespace') tag = kwargs.pop('tag') config_path = kwargs.pop('config_path') repository_prefix =kwargs.pop('repository_prefix') engine = load_engine(['PUSH', 'LOGIN'], engine_name, project_name, services) logger.info(u'Engine integration loaded. Preparing push.', engine=engine.display_name) # Verify that we can authenticate with the registry username, password = engine.login(username, password, email, url, config_path) # Push each image that has been built using Ansible roles for name, service in iteritems(services): if service.get('containers'): for c in service['containers']: if 'roles' in c: cname = '%s-%s' % (name, c['container_name']) image_id = engine.get_latest_image_id_for_service(cname) engine.push(image_id, cname, url=url, tag=tag, namespace=namespace, username=username, password=password, repository_prefix=repository_prefix) elif 'roles' in service: # if the service has roles, it's an image we should push image_id = engine.get_latest_image_id_for_service(name) engine.push(image_id, name, url=url, tag=tag, namespace=namespace, username=username, password=password, repository_prefix=repository_prefix)
def conductorcmd_stop(engine_name, project_name, services, **kwargs): engine = load_engine(['RUN'], engine_name, project_name, services, **kwargs) logger.info(u'Engine integration loaded. Preparing to stop all containers.', engine=engine.display_name) playbook = engine.generate_orchestration_playbook(**kwargs) rc = run_playbook(playbook, engine, services, tags=['stop'], **kwargs) logger.info(u'All services stopped.', playbook_rc=rc)
def hostcmd_import(base_path, project_name, engine_name, **kwargs): engine_obj = load_engine(['IMPORT'], engine_name, project_name or os.path.basename(base_path), {}, **kwargs) engine_obj.import_project(base_path, **kwargs) logger.info('Project imported.')
def conductorcmd_destroy(engine_name, project_name, services, **kwargs): engine = load_engine(['RUN'], engine_name, project_name, services, **kwargs) logger.info(u'Engine integration loaded. Preparing to stop+delete all ' u'containers and built images.', engine=engine.display_name) playbook = engine.generate_orchestration_playbook(**kwargs) rc = run_playbook(playbook, engine, services, tags=['destroy'], **kwargs) logger.info(u'All services destroyed.', playbook_rc=rc)
def conductorcmd_restart(engine_name, project_name, services, **kwargs): engine = load_engine(['RUN'], engine_name, project_name, services, **kwargs) logger.info(u'Engine integration loaded. Preparing to restart containers.', engine=engine.display_name) playbook = engine.generate_orchestration_playbook(**kwargs) rc = run_playbook(playbook, engine, services, tags=['restart'], **kwargs) logger.info(u'All services restarted.', playbook_rc=rc)
def hostcmd_version(base_path, project_name, engine_name, **kwargs): print('Ansible Container, version', __version__) if kwargs.get('debug', False): print(u', '.join(os.uname())) print(sys.version, sys.executable) assert_initialized(base_path) engine_obj = load_engine(['VERSION'], engine_name, project_name or os.path.basename(base_path), {}, **kwargs) engine_obj.print_version_info()
def conductorcmd_destroy(engine_name, project_name, services, **kwargs): engine = load_engine(['RUN'], engine_name, project_name, services, **kwargs) logger.info( u'Engine integration loaded. Preparing to stop+delete all ' u'containers and built images.', engine=engine.display_name) playbook = engine.generate_orchestration_playbook(**kwargs) rc = run_playbook(playbook, engine, services, tags=['destroy'], **kwargs) logger.info(u'All services destroyed.', playbook_rc=rc)
def conductorcmd_stop(engine_name, project_name, services, **kwargs): engine = load_engine(['RUN'], engine_name, project_name, services, **kwargs) logger.info(u'Engine integration loaded. Preparing to stop all containers.', engine=engine.display_name) playbook = engine.generate_orchestration_playbook(**kwargs) rc = run_playbook(playbook, engine, services, tags=['stop'], **kwargs) if rc: raise AnsibleContainerException( "Error executing the stop command. Some containers may still be running." ) logger.info(u'All services stopped.', playbook_rc=rc)
def conductorcmd_restart(engine_name, project_name, services, **kwargs): engine = load_engine(['RUN'], engine_name, project_name, services, **kwargs) logger.info(u'Engine integration loaded. Preparing to restart containers.', engine=engine.display_name) playbook = engine.generate_orchestration_playbook(**kwargs) rc = run_playbook(playbook, engine, services, tags=['restart'], **kwargs) if rc: raise AnsibleContainerException( 'Error executing the restart command. Not all containers may be running.' ) logger.info(u'All services restarted.', playbook_rc=rc)
def hostcmd_install(base_path, project_name, engine_name, **kwargs): assert_initialized(base_path) config = get_config(base_path, engine_name=engine_name, project_name=project_name) save_conductor = config.save_conductor engine_obj = load_engine(['INSTALL'], engine_name, config.project_name, config['services'], **kwargs) engine_obj.await_conductor_command('install', dict(config), base_path, kwargs, save_container=save_conductor)
def conductorcmd_destroy(engine_name, project_name, services, **kwargs): engine = load_engine(['RUN'], engine_name, project_name, services, **kwargs) logger.info(u'Engine integration loaded. Preparing to stop+delete all ' u'containers and built images.', engine=engine.display_name) playbook = engine.generate_orchestration_playbook(**kwargs) rc = run_playbook(playbook, engine, services, tags=['destroy'], **kwargs) if rc: raise AnsibleContainerException( 'Error executing the destroy command. Not all containers and images may have been removed.' ) logger.info(u'All services destroyed.', playbook_rc=rc)
def hostcmd_install(base_path, project_name, engine_name, **kwargs): assert_initialized(base_path) config = get_config(base_path, engine_name=engine_name) save_conductor = config.get('settings', {}).get('save_conductor_container', False) engine_obj = load_engine(['INSTALL'], engine_name, project_name or os.path.basename(base_path), config['services'], **kwargs) engine_obj.await_conductor_command('install', dict(config), base_path, kwargs, save_container=save_conductor)
def conductorcmd_stop(engine_name, project_name, services, **kwargs): engine = load_engine(['RUN'], engine_name, project_name, services, **kwargs) logger.info( u'Engine integration loaded. Preparing to stop all containers.', engine=engine.display_name) playbook = engine.generate_orchestration_playbook(**kwargs) rc = run_playbook(playbook, engine, services, tags=['stop'], **kwargs) if rc: raise AnsibleContainerException( "Error executing the stop command. Some containers may still be running." ) logger.info(u'All services stopped.', playbook_rc=rc)
def hostcmd_build(base_path, project_name, engine_name, vars_files=None, **kwargs): conductor_cache = kwargs['cache'] and kwargs['conductor_cache'] config = get_config(base_path, vars_files=vars_files, engine_name=engine_name, project_name=project_name) engine_obj = load_engine(['BUILD', 'RUN'], engine_name, config.project_name, config['services'], **kwargs) conductor_container_id = engine_obj.get_container_id_for_service('conductor') conductor_image_id = engine_obj.get_latest_image_id_for_service('conductor') if engine_obj.service_is_running('conductor'): engine_obj.stop_container(conductor_container_id, forcefully=True) if conductor_image_id is None or not kwargs.get('devel'): #TODO once we get a conductor running, figure out how to know it's running if engine_obj.CAP_BUILD_CONDUCTOR: env_vars = [] if config.get('settings', {}).get('conductor', {}).get('environment', {}): environment = config['settings']['conductor']['environment'] if isinstance(environment, dict): for key, value in iteritems(environment): env_vars.append('{}={}'.format(key, value)) else: env_vars = environment if kwargs.get('with_variables'): env_vars += kwargs['with_variables'] engine_obj.build_conductor_image( base_path, config.conductor_base, cache=conductor_cache, environment=env_vars ) else: logger.warning(u'%s does not support building the Conductor image.', engine_obj.display_name, engine=engine_obj.display_name) if conductor_container_id: engine_obj.delete_container(conductor_container_id) logger.debug('Config settings', config=config, rawsettings=config.get('settings'), conf=type(config), settings=config.get('settings', {})) save_container = config.save_conductor if kwargs.get('save_conductor_container'): # give precedence to CLI option save_container = True kwargs['cache'] = kwargs['cache'] and kwargs['container_cache'] kwargs['config_vars'] = config.get('defaults') engine_obj.await_conductor_command( 'build', dict(config), base_path, kwargs, save_container=save_container)
def conductorcmd_destroy(engine_name, project_name, services, **kwargs): engine = load_engine(['RUN'], engine_name, project_name, services, **kwargs) logger.info( u'Engine integration loaded. Preparing to stop+delete all ' u'containers and built images.', engine=engine.display_name) playbook = engine.generate_orchestration_playbook(**kwargs) rc = run_playbook(playbook, engine, services, tags=['destroy'], **kwargs) if rc: raise AnsibleContainerException( 'Error executing the destroy command. Not all containers and images may have been removed.' ) logger.info(u'All services destroyed.', playbook_rc=rc)
def hostcmd_build(base_path, project_name, engine_name, var_file=None, **kwargs): config = get_config(base_path, var_file=var_file, engine_name=engine_name) engine_obj = load_engine(['BUILD', 'RUN'], engine_name, project_name or os.path.basename(base_path), config['services'], **kwargs) conductor_container_id = engine_obj.get_container_id_for_service( 'conductor') conductor_image_id = engine_obj.get_latest_image_id_for_service( 'conductor') if engine_obj.service_is_running('conductor'): engine_obj.stop_container(conductor_container_id, forcefully=True) if conductor_image_id is None or not kwargs.get('devel'): #TODO once we get a conductor running, figure out how to know it's running if engine_obj.CAP_BUILD_CONDUCTOR: engine_obj.build_conductor_image(base_path, config.get('settings', {}).get( 'conductor_base', DEFAULT_CONDUCTOR_BASE), cache=kwargs['cache']) else: logger.warning( u'%s does not support building the Conductor image.', engine_obj.display_name, engine=engine_obj.display_name) if conductor_container_id: engine_obj.delete_container(conductor_container_id) logger.debug('Config settings', config=config, rawsettings=config.get('settings'), tconf=type(config), settings=config.get('settings', {})) save_container = config.get('settings', {}).get('save_conductor_container', False) if kwargs.get('save_conductor_container'): # give precedence to CLI option save_container = True engine_obj.await_conductor_command('build', dict(config), base_path, kwargs, save_container=save_container)
def hostcmd_deploy(base_path, project_name, engine_name, vars_files=None, cache=True, vault_files=None, **kwargs): assert_initialized(base_path) config = get_config(base_path, vars_files=vars_files, engine_name=engine_name, project_name=project_name, vault_files=vault_files) local_images = kwargs.get('local_images') output_path = kwargs.pop('deployment_output_path', None) or config.deployment_path engine_obj = load_engine(['LOGIN', 'PUSH', 'DEPLOY'], engine_name, config.project_name, config['services'], **kwargs) params = { 'deployment_output_path': os.path.normpath(os.path.abspath(os.path.expanduser(output_path))), 'host_user_uid': os.getuid(), 'host_user_gid': os.getgid(), 'settings': config.get('settings', {}), } if kwargs: params.update(kwargs) if not local_images: push_options = push_images(base_path, config.image_namespace, engine_obj, config, save_conductor=False, **params) params.update(push_options) params['vault_files'] = config.vault_files engine_obj.await_conductor_command('deploy', dict(config), base_path, params, save_container=config.save_conductor)
def conductorcmd_run(engine_name, project_name, services, **kwargs): engine = load_engine(['RUN'], engine_name, project_name, services, **kwargs) logger.info(u'Engine integration loaded. Preparing run.', engine=engine.display_name) engine.containers_built_for_services( [service for service, service_desc in services.items() if service_desc.get('roles')]) logger.debug("In conductorcmd_run", kwargs=kwargs) playbook = engine.generate_orchestration_playbook(**kwargs) logger.debug("in conductorcmd_run", playbook=playbook) rc = run_playbook(playbook, engine, services, tags=['start'], **kwargs) logger.info(u'All services running.', playbook_rc=rc)
def conductorcmd_run(engine_name, project_name, services, **kwargs): engine = load_engine(['RUN'], engine_name, project_name, services, **kwargs) logger.info(u'Engine integration loaded. Preparing run.', engine=engine.display_name) engine.containers_built_for_services([ service for service, service_desc in services.items() if service_desc.get('roles') ]) logger.debug("In conductorcmd_run", kwargs=kwargs) playbook = engine.generate_orchestration_playbook(**kwargs) logger.debug("in conductorcmd_run", playbook=playbook) rc = run_playbook(playbook, engine, services, tags=['start'], **kwargs) logger.info(u'All services running.', playbook_rc=rc)
def hostcmd_deploy(base_path, project_name, engine_name, var_file=None, cache=True, **kwargs): assert_initialized(base_path) logger.debug('Got extra args to `deploy` command', arguments=kwargs) config = get_config(base_path, var_file=var_file, engine_name=engine_name) local_images = kwargs.get('local_images') output_path = kwargs.pop('deployment_output_path', None) or config.deployment_path engine_obj = load_engine(['LOGIN', 'PUSH', 'DEPLOY'], engine_name, project_name or os.path.basename(base_path), config['services'], **kwargs) params = { 'deployment_output_path': os.path.normpath(os.path.expanduser(output_path)), 'host_user_uid': os.getuid(), 'host_user_gid': os.getgid(), } if config.get('settings', {}).get('k8s_auth'): params['k8s_auth'] = config['settings']['k8s_auth'] if config.get('volumes'): params['volumes'] = config['volumes'] if kwargs: params.update(kwargs) if not local_images: url, namespace = push_images(base_path, engine_obj, config, save_conductor=False, **params) params['url'] = url params['namespace'] = namespace engine_obj.await_conductor_command('deploy', dict(config), base_path, params, save_container=config.get( 'settings', {}).get('save_conductor_container', False))
def conductorcmd_run(engine_name, project_name, services, **kwargs): engine = load_engine(['RUN'], engine_name, project_name, services, **kwargs) logger.info(u'Engine integration loaded. Preparing run.', engine=engine.display_name) engine.containers_built_for_services( [service for service, service_desc in services.items() if service_desc.get('roles')]) playbook = engine.generate_orchestration_playbook(**kwargs) rc = run_playbook(playbook, engine, services, tags=['start'], **kwargs) if rc: raise AnsibleContainerException( 'Error executing the run command. Not all containers may be running.' ) logger.info(u'All services running.', playbook_rc=rc)
def conductorcmd_push(engine_name, project_name, services, **kwargs): """ Push images to a registry """ username = kwargs.pop('username') password = kwargs.pop('password') email = kwargs.pop('email') url = kwargs.pop('url') namespace = kwargs.pop('namespace') tag = kwargs.pop('tag') config_path = kwargs.pop('config_path') repository_prefix = kwargs.pop('repository_prefix') engine = load_engine(['PUSH', 'LOGIN'], engine_name, project_name, services) logger.info(u'Engine integration loaded. Preparing push.', engine=engine.display_name) # Verify that we can authenticate with the registry username, password = engine.login(username, password, email, url, config_path) # Push each image that has been built using Ansible roles for name, service in iteritems(services): if service.get('containers'): for c in service['containers']: if 'roles' in c: cname = '%s-%s' % (name, c['container_name']) image_id = engine.get_latest_image_id_for_service(cname) engine.push(image_id, cname, url=url, tag=tag, namespace=namespace, username=username, password=password, repository_prefix=repository_prefix) elif 'roles' in service: # if the service has roles, it's an image we should push image_id = engine.get_latest_image_id_for_service(name) engine.push(image_id, name, url=url, tag=tag, namespace=namespace, username=username, password=password, repository_prefix=repository_prefix)
def hostcmd_push(base_path, project_name, engine_name, var_file=None, **kwargs): """ Push images to a registry. Requires authenticating with the registry prior to starting the push. If your engine's config file does not already contain an authorization for the registry, pass username and/or password. If you exclude password, you will be prompted. """ assert_initialized(base_path) config = get_config(base_path, var_file=var_file, engine_name=engine_name) engine_obj = load_engine(['LOGIN', 'PUSH'], engine_name, project_name or os.path.basename(base_path), config['services'], **kwargs) push_images(base_path, engine_obj, config, save_conductor=config.get('settings', {}).get('save_conductor_container', False), **kwargs)
def hostcmd_prebake(distros, debug=False, cache=True, ignore_errors=False): logger.info('Prebaking distros...', distros=distros, cache=cache) engine_obj = load_engine(['BUILD_CONDUCTOR'], 'docker', os.getcwd(), {}, debug=debug) from .docker.engine import PREBAKED_DISTROS for distro in (distros or PREBAKED_DISTROS): logger.info('Now prebaking Conductor image for %s', distro) try: engine_obj.build_conductor_image(os.getcwd(), distro, prebaking=True, cache=cache) except Exception as e: logger.exception('Failure building prebaked image for %s', distro) if ignore_errors: continue except KeyboardInterrupt: if ignore_errors: continue
def conductorcmd_run(engine_name, project_name, services, **kwargs): engine = load_engine(['RUN'], engine_name, project_name, services, **kwargs) logger.info(u'Engine integration loaded. Preparing run.', engine=engine.display_name) engine.containers_built_for_services([ service for service, service_desc in services.items() if service_desc.get('roles') ]) playbook = engine.generate_orchestration_playbook(**kwargs) rc = run_playbook(playbook, engine, services, tags=['start'], **kwargs) if rc: raise AnsibleContainerException( 'Error executing the run command. Not all containers may be running.' ) logger.info(u'All services running.', playbook_rc=rc)
def hostcmd_run(base_path, project_name, engine_name, vars_files=None, cache=True, ask_vault_pass=False, **kwargs): assert_initialized(base_path) logger.debug('Got extra args to `run` command', arguments=kwargs) config = get_config(base_path, vars_files=vars_files, engine_name=engine_name, project_name=project_name) if not kwargs['production']: config.set_env('dev') logger.debug('hostcmd_run configuration', config=config.__dict__) engine_obj = load_engine(['RUN'], engine_name, config.project_name, config['services'], **kwargs) remove_existing_container(engine_obj, 'conductor', remove_volumes=True) params = { 'deployment_output_path': config.deployment_path, 'host_user_uid': os.getuid(), 'host_user_gid': os.getgid(), 'settings': config.get('settings', {}) } if kwargs: params.update(kwargs) logger.debug('Params passed to conductor for run', params=params) if ask_vault_pass: params['vault_password'] = getpass.getpass( u"Enter the vault password: ") engine_obj.await_conductor_command('run', dict(config), base_path, params, save_container=config.save_conductor)
def hostcmd_restart(base_path, project_name, engine_name, vars_files=None, force=False, services=[], **kwargs): config = get_config(base_path, vars_files=vars_files, engine_name=engine_name, project_name=project_name) if not kwargs['production']: config.set_env('dev') engine_obj = load_engine(['RUN'], engine_name, config.project_name, config['services'], **kwargs) params = { 'deployment_output_path': config.deployment_path, 'host_user_uid': os.getuid(), 'host_user_gid': os.getgid(), 'settings': config.get('settings', {}) } if kwargs: params.update(kwargs) engine_obj.await_conductor_command( 'restart', dict(config), base_path, params, save_container=config.save_conductor)
def hostcmd_restart(base_path, project_name, engine_name, force=False, services=[], **kwargs): config = get_config(base_path, engine_name=engine_name) engine_obj = load_engine(['RUN'], engine_name, project_name or os.path.basename(base_path), config['services'], **kwargs) params = { 'deployment_output_path': config.deployment_path, 'host_user_uid': os.getuid(), 'host_user_gid': os.getgid(), } if config.get('settings', {}).get('k8s_auth'): params['k8s_auth'] = config['settings']['k8s_auth'] if kwargs: params.update(kwargs) params.update(kwargs) engine_obj.await_conductor_command( 'restart', dict(config), base_path, params, save_container=config.get('settings', {}).get('save_conductor_container', False))
def conductorcmd_deploy(engine_name, project_name, services, **kwargs): uid, gid = kwargs.get('host_user_uid', 1), kwargs.get('host_user_gid', 1) engine = load_engine(['DEPLOY'], engine_name, project_name, services, **kwargs) logger.info(u'Engine integration loaded. Preparing deploy.', engine=engine.display_name) # Verify all images are built for service_name, service_defn in services.items(): if service_defn.get('roles'): logger.info(u'Verifying image for %s', service_name) image_id = engine.get_latest_image_id_for_service(service_name) if not image_id: msg = u'Missing image for {}. Run "ansible-container build" to (re)create it.'.format( service_name) logger.error(msg, service=service_name) raise RuntimeError(msg) deployment_output_path = kwargs.get('deployment_output_path') playbook = engine.generate_orchestration_playbook(**kwargs) engine.pre_deployment_setup(project_name, services, **kwargs) try: with open( os.path.join(deployment_output_path, '%s.yml' % project_name), 'w') as ofs: ofs.write( ruamel.yaml.round_trip_dump(playbook, indent=4, block_seq_indent=2, default_flow_style=False)) except OSError: logger.error(u'Failure writing deployment playbook', exc_info=True) raise set_path_ownership(deployment_output_path, uid, gid)
def hostcmd_destroy(base_path, project_name, engine_name, vars_files=None, cache=True, **kwargs): assert_initialized(base_path) logger.debug('Got extra args to `destroy` command', arguments=kwargs) config = get_config(base_path, vars_files=vars_files, engine_name=engine_name, project_name=project_name) if not kwargs['production']: config.set_env('dev') engine_obj = load_engine(['RUN'], engine_name, config.project_name, config['services'], **kwargs) remove_existing_container(engine_obj, 'conductor', remove_volumes=True) params = { 'deployment_output_path': config.deployment_path, 'host_user_uid': os.getuid(), 'host_user_gid': os.getgid(), 'settings': config.get('settings', {}) } if kwargs: params.update(kwargs) engine_obj.await_conductor_command( 'destroy', dict(config), base_path, params, save_container=config.save_conductor)
def conductorcmd_build(engine_name, project_name, services, cache=True, local_python=False, ansible_options='', debug=False, config_vars=None, **kwargs): engine = load_engine(['BUILD'], engine_name, project_name, services, **kwargs) logger.info(u'%s integration engine loaded. Build starting.', engine.display_name, project=project_name) services_to_build = kwargs.get('services_to_build') or services.keys() for service_name, service in services.items(): if service_name not in services_to_build: logger.debug('Skipping service %s...', service_name) continue logger.info(u'Building service...', service=service_name, project=project_name) if not service.get('from'): raise AnsibleContainerConfigException( "Expecting service to have 'from' attribute. None found when evaluating " "service: {}.".format(service_name)) cur_image_id = engine.get_image_id_by_tag(service['from']) if not cur_image_id: cur_image_id = engine.pull_image_by_tag(service['from']) if not cur_image_id: raise AnsibleContainerException( "Failed to find image {}. Try `docker image pull {}`". format(service['from'])) # the fingerprint hash tracks cacheability fingerprint_hash = hashlib.sha256('%s::' % cur_image_id) logger.debug(u'Base fingerprint hash = %s', fingerprint_hash.hexdigest(), service=service_name, hash=fingerprint_hash.hexdigest()) cache_busted = not cache cur_container_id = engine.get_container_id_for_service(service_name) if cur_container_id: if engine.service_is_running(service_name): engine.stop_container(cur_container_id, forcefully=True) engine.delete_container(cur_container_id) if service.get('roles'): for role in service['roles']: role_fingerprint = get_role_fingerprint(role) fingerprint_hash.update(role_fingerprint) if not cache_busted: logger.debug(u'Still trying to keep cache.', service=service_name) cached_image_id = engine.get_image_id_by_fingerprint( fingerprint_hash.hexdigest()) if cached_image_id: # We can reuse the cached image logger.debug(u'Cached layer found for service', service=service_name, fingerprint=fingerprint_hash.hexdigest()) cur_image_id = cached_image_id logger.info(u'Applied role %s from cache', role, service=service_name, role=role) continue else: cache_busted = True logger.debug( u'Cache busted! No layer found', service=service_name, fingerprint=fingerprint_hash.hexdigest(), ) run_kwargs = dict( name=engine.container_name_for_service(service_name), user='******', working_dir='/', command='sh -c "while true; do sleep 1; ' 'done"', entrypoint=[], privileged=True, volumes=dict(), environment=dict(ANSIBLE_CONTAINER=1)) if service.get('volumes'): for volume in service['volumes']: pieces = volume.split(':') src = pieces[0] bind = pieces[0] mode = 'rw' if len(pieces) > 1: bind = pieces[1] if len(pieces) > 2: mode = pieces[2] run_kwargs[u'volumes'][src] = { u'bind': bind, u'mode': mode } if not local_python: # If we're on a debian based distro, we need the correct architecture # to allow python to load dynamically loaded shared libraries extra_library_paths = '' try: architecture = subprocess.check_output( ['dpkg-architecture', '-qDEB_HOST_MULTIARCH']) architecture = architecture.strip() logger.debug(u'Detected architecture %s', architecture, service=service_name, architecture=architecture) extra_library_paths = ( ':/_usr/lib/{0}:/_usr/local/lib/{0}' ':/_lib/{0}').format(architecture) except Exception: # we're not on debian/ubuntu or a system without multiarch support pass # Use the conductor's Python runtime run_kwargs['volumes'][engine.get_runtime_volume_id( '/usr')] = { 'bind': '/_usr', 'mode': 'ro' } try: run_kwargs['volumes'][engine.get_runtime_volume_id( '/lib')] = { 'bind': '/_lib', 'mode': 'ro' } extra_library_paths += ":/_lib" except ValueError: # No /lib volume pass run_kwargs['environment'].update( dict( LD_LIBRARY_PATH= '/usr/lib:/usr/lib64:/_usr/lib:/_usr/lib64:/_usr/local/lib{}' .format(extra_library_paths), CPATH= '/usr/include:/usr/local/include:/_usr/include:/_usr/local/include', PATH='/usr/local/sbin:/usr/local/bin:' '/usr/sbin:/usr/bin:/sbin:/bin:' '/_usr/sbin:/_usr/bin:' '/_usr/local/sbin:/_usr/local/bin', # PYTHONPATH='/_usr/lib/python2.7' )) container_id = engine.run_container(cur_image_id, service_name, **run_kwargs) while not engine.service_is_running(service_name): time.sleep(0.2) logger.debug('Container running', id=container_id) rc = apply_role_to_container(role, container_id, service_name, engine, vars=config_vars, local_python=local_python, ansible_options=ansible_options, debug=debug) logger.debug('Playbook run finished.', exit_code=rc) if rc: raise RuntimeError('Build failed.') logger.info(u'Applied role to service', service=service_name, role=role) engine.stop_container(container_id, forcefully=True) is_last_role = role is service['roles'][-1] if is_last_role and kwargs.get('flatten'): logger.debug("Finished build, flattening image") image_id = engine.flatten_container( container_id, service_name, service) logger.info(u'Saved flattened image for service', service=service_name, image=image_id) else: image_id = engine.commit_role_as_layer( container_id, service_name, fingerprint_hash.hexdigest(), service, with_name=is_last_role) logger.info(u'Committed layer as image', service=service_name, image=image_id) engine.delete_container(container_id) cur_image_id = image_id # Tag the image also as latest: engine.tag_image_as_latest(service_name, cur_image_id) logger.info(u'Build complete.', service=service_name) else: logger.info(u'Service had no roles specified. Nothing to do.', service=service_name) logger.info(u'All images successfully built.')
def conductorcmd_build(engine_name, project_name, services, cache=True, local_python=False, ansible_options='', debug=False, **kwargs): engine = load_engine(['BUILD'], engine_name, project_name, services, **kwargs) logger.info(u'%s integration engine loaded. Build starting.', engine.display_name, project=project_name) services_to_build = kwargs.get('services_to_build') or services.keys() for service_name, service in services.items(): if service_name not in services_to_build: logger.debug('Skipping service %s...', service_name) continue logger.info(u'Building service...', service=service_name, project=project_name) cur_image_id = engine.get_image_id_by_tag(service['from']) # the fingerprint hash tracks cacheability fingerprint_hash = hashlib.sha256('%s::' % cur_image_id) logger.debug(u'Base fingerprint hash = %s', fingerprint_hash.hexdigest(), service=service_name, hash=fingerprint_hash.hexdigest()) cache_busted = not cache cur_container_id = engine.get_container_id_for_service(service_name) if cur_container_id: if engine.service_is_running(service_name): engine.stop_container(cur_container_id, forcefully=True) engine.delete_container(cur_container_id) if service.get('roles'): for role in service['roles']: role_fingerprint = get_role_fingerprint(role) fingerprint_hash.update(role_fingerprint) if not cache_busted: logger.debug(u'Still trying to keep cache.', service=service_name) cached_image_id = engine.get_image_id_by_fingerprint( fingerprint_hash.hexdigest()) if cached_image_id: # We can reuse the cached image logger.debug(u'Cached layer found for service', service=service_name, fingerprint=fingerprint_hash.hexdigest()) cur_image_id = cached_image_id logger.info(u'Applied role %s from cache', role, service=service_name, role=role) continue else: cache_busted = True logger.debug(u'Cache busted! No layer found', service=service_name, fingerprint=fingerprint_hash.hexdigest(), ) run_kwargs = dict( name=engine.container_name_for_service(service_name), user='******', working_dir='/', command='sh -c "while true; do sleep 1; ' 'done"', entrypoint=[], privileged=True, volumes=dict() ) if service.get('volumes'): for volume in service['volumes']: pieces = volume.split(':') src = pieces[0] bind = pieces[0] mode = 'rw' if len(pieces) > 1: bind = pieces[1] if len(pieces) > 2: mode = pieces[2] run_kwargs[u'volumes'][src] = {u'bind': bind, u'mode': mode} if not local_python: # If we're on a debian based distro, we need the correct architecture # to allow python to load dynamically loaded shared libraries extra_library_paths = '' try: architecture = subprocess.check_output(['dpkg-architecture', '-qDEB_HOST_MULTIARCH']) architecture = architecture.strip() logger.debug(u'Detected architecture %s', architecture, service=service_name, architecture=architecture) extra_library_paths = (':/_usr/lib/{0}:/_usr/local/lib/{0}' ':/_lib/{0}').format(architecture) except Exception: # we're not on debian/ubuntu or a system without multiarch support pass # Use the conductor's Python runtime run_kwargs['volumes'] = {engine.get_runtime_volume_id('/usr'): {'bind': '/_usr', 'mode': 'ro'}} try: run_kwargs['volumes'][engine.get_runtime_volume_id('/lib')] = {'bind': '/_lib', 'mode': 'ro'} extra_library_paths += ":/_lib" except ValueError: # No /lib volume pass run_kwargs['environment'] = dict( LD_LIBRARY_PATH='/_usr/lib:/_usr/lib64:/_usr/local/lib{}'.format(extra_library_paths), CPATH='/_usr/include:/_usr/local/include', PATH='/usr/local/sbin:/usr/local/bin:' '/usr/sbin:/usr/bin:/sbin:/bin:' '/_usr/sbin:/_usr/bin:' '/_usr/local/sbin:/_usr/local/bin', PYTHONPATH='/_usr/lib/python2.7') container_id = engine.run_container(cur_image_id, service_name, **run_kwargs) while not engine.service_is_running(service_name): time.sleep(0.2) logger.debug('Container running', id=container_id) rc = apply_role_to_container(role, container_id, service_name, engine, vars=dict(service['defaults']), local_python=local_python, ansible_options=ansible_options, debug=debug) logger.debug('Playbook run finished.', exit_code=rc) if rc: raise RuntimeError('Build failed.') logger.info(u'Applied role to service', service=service_name, role=role) engine.stop_container(container_id, forcefully=True) is_last_role = role is service['roles'][-1] if is_last_role and kwargs.get('flatten'): logger.debug("Finished build, flattening image") image_id = engine.flatten_container(container_id, service_name, service) logger.info(u'Saved flattened image for service', service=service_name, image=image_id) else: image_id = engine.commit_role_as_layer(container_id, service_name, fingerprint_hash.hexdigest(), service, with_name=is_last_role) logger.info(u'Committed layer as image', service=service_name, image=image_id) engine.delete_container(container_id) cur_image_id = image_id # Tag the image also as latest: engine.tag_image_as_latest(service_name, cur_image_id) logger.info(u'Build complete.', service=service_name) else: logger.info(u'Service had no roles specified. Nothing to do.', service=service_name) logger.info(u'All images successfully built.')
def hostcmd_build(base_path, project_name, engine_name, vars_files=None, **kwargs): conductor_cache = kwargs['cache'] and kwargs['conductor_cache'] config = get_config(base_path, vars_files=vars_files, engine_name=engine_name, project_name=project_name) engine_obj = load_engine(['BUILD', 'RUN'], engine_name, config.project_name, config['services'], **kwargs) conductor_container_id = engine_obj.get_container_id_for_service( 'conductor') conductor_image_id = engine_obj.get_latest_image_id_for_service( 'conductor') if engine_obj.service_is_running('conductor'): engine_obj.stop_container(conductor_container_id, forcefully=True) if conductor_image_id is None or not kwargs.get('devel'): #TODO once we get a conductor running, figure out how to know it's running if engine_obj.CAP_BUILD_CONDUCTOR: env_vars = [] if config.get('settings', {}).get('conductor', {}).get('environment', {}): environment = config['settings']['conductor']['environment'] if isinstance(environment, dict): for key, value in iteritems(environment): env_vars.append('{}={}'.format(key, value)) else: env_vars = environment if kwargs.get('with_variables'): env_vars += kwargs['with_variables'] engine_obj.build_conductor_image(base_path, config.conductor_base, cache=conductor_cache, environment=env_vars) else: logger.warning( u'%s does not support building the Conductor image.', engine_obj.display_name, engine=engine_obj.display_name) if conductor_container_id: engine_obj.delete_container(conductor_container_id) logger.debug('Config settings', config=config, rawsettings=config.get('settings'), conf=type(config), settings=config.get('settings', {})) save_container = config.save_conductor if kwargs.get('save_conductor_container'): # give precedence to CLI option save_container = True kwargs['cache'] = kwargs['cache'] and kwargs['container_cache'] kwargs['config_vars'] = config.get('defaults') engine_obj.await_conductor_command('build', dict(config), base_path, kwargs, save_container=save_container)
def conductorcmd_build(engine_name, project_name, services, cache=True, python_interpreter=None, ansible_options='', debug=False, **kwargs): engine = load_engine(['BUILD'], engine_name, project_name, services, **kwargs) logger.info(u'%s integration engine loaded. Build starting.', engine.display_name, project=project_name) services_to_build = kwargs.get('services_to_build') or services.keys() for service_name, service in services.items(): if service_name not in services_to_build: logger.debug('Skipping service %s...', service_name) continue logger.info(u'Building service...', service=service_name, project=project_name) cur_image_id = engine.get_image_id_by_tag(service['from']) # the fingerprint hash tracks cacheability fingerprint_hash = hashlib.sha256('%s::' % cur_image_id) logger.debug(u'Base fingerprint hash = %s', fingerprint_hash.hexdigest(), service=service_name, hash=fingerprint_hash.hexdigest()) cache_busted = not cache cur_container_id = engine.get_container_id_for_service(service_name) if cur_container_id: if engine.service_is_running(service_name): engine.stop_container(cur_container_id, forcefully=True) engine.delete_container(cur_container_id) if service.get('roles'): for role in service['roles']: role_fingerprint = get_role_fingerprint(role) fingerprint_hash.update(role_fingerprint) if not cache_busted: logger.debug(u'Still trying to keep cache.', service=service_name) cached_image_id = engine.get_image_id_by_fingerprint( fingerprint_hash.hexdigest()) if cached_image_id: # We can reuse the cached image logger.debug(u'Cached layer found for service', service=service_name, fingerprint=fingerprint_hash.hexdigest()) cur_image_id = cached_image_id logger.info(u'Applied role %s from cache', role, service=service_name, role=role) continue else: cache_busted = True logger.debug( u'Cache busted! No layer found', service=service_name, fingerprint=fingerprint_hash.hexdigest(), ) container_id = engine.run_container( cur_image_id, service_name, name=engine.container_name_for_service(service_name), user='******', working_dir='/', command='sh -c "while true; do sleep 1; ' 'done"', entrypoint=[], environment=dict( LD_LIBRARY_PATH='/_usr/lib:/_usr/local/lib', CPATH='/_usr/include:/_usr/local/include', PATH='/usr/local/sbin:/usr/local/bin:' '/usr/sbin:/usr/bin:/sbin:/bin:' '/_usr/sbin:/_usr/bin:' '/_usr/local/sbin:/_usr/local/bin', PYTHONPATH='/_usr/lib/python2.7'), volumes={ engine.get_runtime_volume_id(): { 'bind': '/_usr', 'mode': 'ro' } }) while not engine.service_is_running(service_name): time.sleep(0.2) logger.debug('Container running', id=container_id) rc = apply_role_to_container( role, container_id, service_name, engine, vars=dict(service['defaults']), python_interpreter=python_interpreter, ansible_options=ansible_options, debug=debug) logger.debug('Playbook run finished.', exit_code=rc) if rc: raise RuntimeError('Build failed.') logger.info(u'Applied role to service', service=service_name, role=role) engine.stop_container(container_id, forcefully=True) is_last_role = role is service['roles'][-1] if is_last_role and kwargs.get('flatten'): logger.debug("Finished build, flattening image") image_id = engine.flatten_container( container_id, service_name, service) logger.info(u'Saved flattened image for service', service=service_name, image=image_id) else: image_id = engine.commit_role_as_layer( container_id, service_name, fingerprint_hash.hexdigest(), service, with_name=is_last_role) logger.info(u'Committed layer as image', service=service_name, image=image_id) engine.delete_container(container_id) cur_image_id = image_id # Tag the image also as latest: engine.tag_image_as_latest(service_name, cur_image_id) logger.info(u'Build complete.', service=service_name) else: logger.info(u'Service had no roles specified. Nothing to do.', service=service_name) logger.info(u'All images successfully built.')