def test_auto_client(self): client = docker.AutoVersionClient(**kwargs_from_env()) client_version = client._version api_version = client.version(api_version=False)['ApiVersion'] self.assertEqual(client_version, api_version) api_version_2 = client.version()['ApiVersion'] self.assertEqual(client_version, api_version_2) client.close() with self.assertRaises(docker.errors.DockerException): docker.AutoVersionClient(version='1.11', **kwargs_from_env())
def run_container_with_docker(image, command, name=None, environment={}): """Run a container with docker mounted in it.""" cli = docker.AutoVersionClient(base_url=dd) volumes = ['/var/run/docker.sock'] binds = { '/var/run/docker.sock': { 'bind': '/var/run/docker.sock', 'ro': False } } try: abaco_conf_host_path = os.environ.get('abaco_conf_host_path') if not abaco_conf_host_path: abaco_conf_host_path = Config.get('spawner', 'abaco_conf_host_path') print("docker_utils using abaco_conf_host_path={}".format( abaco_conf_host_path)) # if an abaco_conf_host_path was configured, mount that into the running container volumes.append('/abaco.conf') binds[abaco_conf_host_path] = {'bind': '/abaco.conf', 'ro': True} except configparser.NoOptionError: pass host_config = cli.create_host_config(binds=binds) container = cli.create_container(image=image, environment=environment, volumes=volumes, host_config=host_config, command=command) cli.start(container=container.get('Id')) return container
def __init__(self, *args, **kwargs): self.options = kwargs.get('options', {}) self.args = args self.kwargs = kwargs self.client = docker.AutoVersionClient() #Client(base_url='unix://var/run/docker.sock') self.responses = []
def build_buildcontainer_image(base_path): assert_initialized(base_path) # To ensure version compatibility, we have to generate the kwargs ourselves client_kwargs = kwargs_from_env(assert_hostname=False) client = docker.AutoVersionClient(**client_kwargs) with make_temp_dir() as temp_dir: logger.info('Building Docker Engine context...') tarball_path = os.path.join(temp_dir, 'context.tar') tarball_file = open(tarball_path, 'wb') tarball = tarfile.TarFile(fileobj=tarball_file, mode='w') container_dir = os.path.normpath(os.path.join(base_path, 'ansible')) try: tarball.add(container_dir, arcname='ansible') except OSError: raise AnsibleContainerNotInitializedException() jinja_render_to_temp('ansible-dockerfile.j2', temp_dir, 'Dockerfile') tarball.add(os.path.join(temp_dir, 'Dockerfile'), arcname='Dockerfile') jinja_render_to_temp( 'hosts.j2', temp_dir, 'hosts', hosts=extract_hosts_from_docker_compose(base_path)) tarball.add(os.path.join(temp_dir, 'hosts'), arcname='hosts') tarball.close() tarball_file = open(tarball_path, 'rb') logger.info('Starting Docker build of Ansible Container image...') return [ streamline for streamline in client.build(fileobj=tarball_file, rm=True, custom_context=True, pull=True, forcerm=True, tag='ansible-container-builder') ]
def pull_image(image): """ Update the local registry with an actor's image. :param actor_id: :return: """ logger.debug("top of pull_image()") cli = docker.AutoVersionClient(base_url=dd) try: rsp = cli.pull(repository=image) except Exception as e: msg = "Error pulling image {} - exception: {} ".format(image, e) logger.info(msg) raise DockerError(msg) if '"message":"Error' in rsp: if '{} not found'.format(image) in rsp: msg = "Image {} was not found on the public registry.".format( image) logger.info(msg) raise DockerError(msg) else: msg = "There was an error pulling the image: {}".format(rsp) logger.error(msg) raise DockerError(msg) return rsp
def get_output_from_container(image, command): """Run 'command' in shell in container based on 'image'. Get its output by redirecting STDOUT to mounted file. Return list o lines from the 'command' output. """ logger.info("Running '%s' in image '%s'", command, image) cli = docker.AutoVersionClient(base_url=containerdiff.docker_socket) volume_dir = tempfile.mkdtemp(dir="/tmp") logger.debug("Container output volume: %s", volume_dir) container = cli.create_container( image, volumes=[volume_dir], host_config=cli.create_host_config( binds=[volume_dir + ":/mnt/containerdiff-volume:Z"]), command= "/bin/sh -c 'set -m; touch /mnt/containerdiff-volume/output; chmod a+rw /mnt/containerdiff-volume/output; exec 1>/mnt/containerdiff-volume/output; " + command + "'", user=os.geteuid()) cli.start(container) error = cli.logs(container) if error != b'': logger.error(error) cli.stop(container) cli.remove_container(container) output = open(os.path.join(volume_dir, "output")).read() shutil.rmtree(volume_dir, ignore_errors=True) return output
def get_client(self): if not self._client: # To ensure version compatibility, we have to generate the kwargs ourselves client_kwargs = kwargs_from_env(assert_hostname=False) self._client = docker.AutoVersionClient(**client_kwargs) self.api_version = self._client.version()['ApiVersion'] return self._client
def image_by_name(img_name, images=None): """ Returns a list of image data for images which match img_name. Will optionally take a list of images from a docker.Client.images query to avoid multiple docker queries. """ i_reg, i_rep, i_tag = _decompose(img_name) # Correct for bash-style matching expressions. if not i_reg: i_reg = '*' if not i_tag: i_tag = '*' # If the images were not passed in, go get them. if images is None: c = docker.AutoVersionClient(**kwargs_from_env()) images = c.images(all=False) valid_images = [] for i in images: for t in i['RepoTags']: reg, rep, tag = _decompose(t) if matches(reg, i_reg) \ and matches(rep, i_rep) \ and matches(tag, i_tag): valid_images.append(i) break # Some repo after decompose end up with the img_name # at the end. i.e. rhel7/rsyslog if rep.endswith(img_name): valid_images.append(i) break return valid_images
def test_push(tmpdir): """ this is an integration test which should be run against real pulp """ client = docker.AutoVersionClient() try: client.inspect_image("busybox:latest") except APIError: client.pull("busybox", tag="latest") image = client.get_image("busybox:latest") image_tar_path = os.path.join(str(tmpdir), "busybox.tar") image_file = open(image_tar_path, "w") image_file.write(image.data) image_file.close() registry_name = os.environ.get("PULP_INSTANCE", None) or "dev" secret_path = os.path.expanduser("~/.pulp/") image_names = [ImageName.parse("test/busybox-test")] workflow = DockerBuildWorkflow(SOURCE, "test/busybox-test") uploader = PulpUploader(workflow, registry_name, image_tar_path, logger, pulp_secret_path=secret_path) uploader.push_tarball_to_pulp(image_names)
def __init__(self, *args, **kwargs): self.options = kwargs.get('options', {}) self.args = args self.kwargs = kwargs self.client = docker.AutoVersionClient() self.responses = [] self._errors = None
def get_client(self): if not self._client: # To ensure version compatibility, we have to generate the kwargs ourselves client_kwargs = kwargs_from_env(assert_hostname=False) self._client = docker.AutoVersionClient(**client_kwargs) self.api_version = self._client.version()['ApiVersion'] # Set the version in the env so it can be used elsewhere os.environ['DOCKER_API_VERSION'] = self.api_version return self._client
def get_docker_client(): """ Universal method to use docker.client() """ try: return docker.AutoVersionClient(**kwargs_from_env()) except docker.errors.DockerException: return docker.Client(**kwargs_from_env())
def main(): context = get_context() print("Contents of context:") for k, v in context.items(): print("key: {}. value: {}".format(k, v)) print("Contents of env: {}".format(os.environ)) cli = docker.AutoVersionClient(base_url='unix://var/run/docker.sock') print("Containers: {}".format(cli.containers()))
def __init__(self): self._containers = None self._images = None kwargs = docker.utils.kwargs_from_env(assert_hostname=False) try: self.client = docker.AutoVersionClient(**kwargs) except docker.errors.DockerException as ex: raise TerminateApplication( "can't establish connection to docker daemon: {0}".format( str(ex)))
def loop(backend, docker_url): """ :param backend: :param docker_url: :return: """ _logger.w('start and supervise event loop.') client = docker.AutoVersionClient(base_url=docker_url) supervisor.supervise(min_seconds=2, max_seconds=64)(_heartbeat)(backend, client)
def __init__(self): self._containers = None self._images = None # displayed images self._all_images = None # docker images -a kwargs = docker.utils.kwargs_from_env(assert_hostname=False) # kwargs["timeout"] = 1 # when debugging timeouts try: self.client = docker.AutoVersionClient(**kwargs) except docker.errors.DockerException as ex: raise TerminateApplication("can't establish connection to docker daemon: {0}".format(str(ex))) self.scratch_image = RootImage(self)
def rm_container(cid): """ Remove a container. :param cid: :return: """ cli = docker.AutoVersionClient(base_url=dd) try: rsp = cli.remove_container(cid, force=True) except Exception as e: raise DockerError("Error removing container {}, exception: {}".format( cid, str(e)))
def cmdrun_push(base_path, username=None, password=None, email=None, url=None, **kwargs): assert_initialized(base_path) # To ensure version compatibility, we have to generate the kwargs ourselves client_kwargs = kwargs_from_env(assert_hostname=False) client = docker.AutoVersionClient(**client_kwargs) if not url: url = DEFAULT_DOCKER_REGISTRY_URL if username and email: # We assume if no username was given, the docker config file suffices while not password: password = getpass.getpass(u'Enter password for %s at %s: ' % (username, url)) client.login(username=username, password=password, email=email, registry=url) username = get_current_logged_in_user(url) if not username: raise AnsibleContainerNoAuthenticationProvided( u'Please provide login ' u'credentials for this registry.') logger.info('Pushing to repository for user %s', username) builder_img_id = client.images(name='ansible-container-builder', quiet=True)[0] project_name = os.path.basename(base_path).lower() for host in extract_hosts_touched_by_playbook(base_path, builder_img_id): image_id, image_buildstamp = get_latest_image_for( project_name, host, client) client.tag(image_id, '%s/%s-%s' % (username, project_name, host), tag=image_buildstamp) logger.info('Pushing %s-%s:%s...', project_name, host, image_buildstamp) status = client.push('%s/%s-%s' % (username, project_name, host), tag=image_buildstamp, stream=True) last_status = None for line in status: line = json.loads(line) if type(line) is dict and 'error' in line: logger.error(line['error']) elif type(line) is dict and 'status' in line: if line['status'] != last_status: logger.info(line['status']) last_status = line['status'] else: logger.debug(line) logger.info('Done!')
def container_running(image=None, name=None): """Check if there is a running container for an image. image should be fully qualified; e.g. image='jstubbs/abaco_core' Can pass wildcards in name using * character; e.g. name='abaco_spawner*' """ filters = {} if name: filters['name'] = name if image: filters['image'] = image cli = docker.AutoVersionClient(base_url=dd) containers = cli.containers(filters=filters) return len(containers) > 0
def rm_container(cid): """ Remove a container. :param cid: :return: """ cli = docker.AutoVersionClient(base_url=dd) try: rsp = cli.remove_container(cid, force=True) except Exception as e: logger.info( "Got exception trying to remove container: {}. Exception: {}". format(cid, e)) raise DockerError("Error removing container {}, exception: {}".format( cid, str(e))) logger.info("container {} removed.".format(cid))
def run(image1, image2): """Test metadata of the image. Adds one key to the output of the diff tool: "metadata" - unified_diff style changes in metadata (see output of "test_metadata" function in this module) """ ID1, metadata1, output_dir1 = image1 ID2, metadata2, output_dir2 = image2 logger.info("Testing metadata of the image.") cli = docker.AutoVersionClient(base_url = containerdiff.docker_socket) inspect_metadata1 = cli.inspect_image(ID1) inspect_metadata2 = cli.inspect_image(ID2) diff = test_metadata(ID1, ID2, inspect_metadata1, inspect_metadata2) return {"metadata": diff}
def docker_client(log): log.debug("Preparing Docker client...") # Default timeout 10 minutes try: timeout = int(os.getenv('DOCKER_TIMEOUT', 600)) except ValueError as e: raise Error("Provided timeout value: %s cannot be parsed as integer, exiting." % os.getenv('DOCKER_TIMEOUT')) if not timeout > 0: raise Error( "Provided timeout value needs to be greater than zero, currently: %s, exiting." % timeout) # backwards compat try: os.environ["DOCKER_HOST"] = os.environ["DOCKER_CONNECTION"] log.warn("DOCKER_CONNECTION is deprecated, please use DOCKER_HOST instead") except KeyError: pass params = docker.utils.kwargs_from_env() params["timeout"] = timeout try: client = docker.AutoVersionClient(**params) except docker.errors.DockerException as e: log.error( "Could not create Docker client, please make sure that you specified valid parameters in the 'DOCKER_HOST' environment variable.") raise Error("Error while creating the Docker client: %s" % e) if client and valid_docker_connection(client): log.debug("Docker client ready") return client else: log.error( "Could not connect to the Docker daemon, please make sure the Docker daemon is running.") if os.environ.get('DOCKER_HOST'): log.error( "If Docker daemon is running, please make sure that you specified valid parameters in the 'DOCKER_HOST' environment variable.") raise Error("Cannot connect to Docker daemon")
def pull_image(image): """ Update the local registry with an actor's image. :param actor_id: :return: """ cli = docker.AutoVersionClient(base_url=dd) try: rsp = cli.pull(repository=image) except Exception as e: raise DockerError("Error pulling image {} - exception: {} ".format( image, str(e))) if '"message":"Error' in rsp: if '{} not found'.format(image) in rsp: raise DockerError( "Image {} was not found on the public registry.".format(image)) else: raise DockerError( "There was an error pulling the image: {}".format(rsp)) return rsp
def docker_client(): # Default timeout 10 minutes try: timeout = int(os.getenv('DOCKER_TIMEOUT', 600)) except ValueError as e: print( "Provided timeout value: %s cannot be parsed as integer, exiting." % os.getenv('DOCKER_TIMEOUT')) sys.exit(1) if not timeout > 0: print( "Provided timeout value needs to be greater than zero, currently: %s, exiting." % timeout) sys.exit(1) # Default base url for the connection base_url = os.getenv('DOCKER_CONNECTION', 'unix://var/run/docker.sock') try: client = docker.AutoVersionClient(base_url=base_url, timeout=timeout) except docker.errors.DockerException as e: print("Error while creating the Docker client: %s" % e) print( "Please make sure that you specified valid parameters in the 'DOCKER_CONNECTION' environment variable." ) sys.exit(1) if client and valid_docker_connection(client): return client else: print( "Could not connect to the Docker daemon, please make sure the Docker daemon is running." ) if os.environ.get('DOCKER_CONNECTION'): print( "If Docker daemon is running, please make sure that you specified valid parameters in the 'DOCKER_CONNECTION' environment variable." ) sys.exit(1)
def run(image1, image2): """Test history of the image. Adds one key to the output of the diff tool: "history" - unified_diff style changes in commands used to create the image """ ID1, metadata1, output_dir1 = image1 ID2, metadata2, output_dir2 = image2 logger.info("Testing history of the image") cli = docker.AutoVersionClient(base_url = containerdiff.docker_socket) history1 = dockerfile_from_image(ID1, cli) history2 = dockerfile_from_image(ID2, cli) # Do unified_diff of commands diff = [item for item in difflib.unified_diff(history1, history2, n=0) if not item.startswith(("+++","---","@@"))] return {"history":diff}
def container_running(image=None, name=None): """Check if there is a running container for an image. image should be fully qualified; e.g. image='jstubbs/abaco_core' Can pass wildcards in name using * character; e.g. name='abaco_spawner*' """ logger.debug("top of container_running().") filters = {} if name: filters['name'] = name if image: filters['image'] = image cli = docker.AutoVersionClient(base_url=dd) try: containers = cli.containers(filters=filters) except Exception as e: msg = "There was an error checking container_running for image: {}. Exception: {}".format( image, e) logger.error(msg) raise DockerError(msg) logger.debug("found containers: {}".format(containers)) return len(containers) > 0
def create_dclient(): #return docker.Client() return docker.AutoVersionClient()
def before_all(context): """Perform the setup before the first event.""" context.config.setup_logging() context.start_system = _start_system context.teardown_system = _teardown_system context.restart_system = _restart_system context.run_command_in_service = _run_command_in_service context.exec_command_in_container = _exec_command_in_container context.is_running = _is_running context.is_jobs_debug_api_running = _is_jobs_debug_api_running context.is_component_search_service_running = _is_component_search_service_running context.is_master_tag_list_service_running = _is_master_tag_list_service_running context.wait_for_master_tag_list_service = _wait_for_master_tag_list_service context.is_get_untagged_component_service_running = _is_get_untagged_component_service_running context.wait_for_get_untagged_component_service = _wait_for_get_untagged_component_service context.send_json_file = _send_json_file context.wait_for_jobs_debug_api_service = _wait_for_jobs_debug_api_service context.wait_for_component_search_service = _wait_for_component_search_service context.is_3scale_staging_running = _is_3scale_staging_running context.is_backbone_api_running = _is_backbone_api_running context.is_gemini_api_running = _is_gemini_api_running # Configure container logging context.dump_logs = _read_boolean_setting(context, 'dump_logs') tail_logs = int(context.config.userdata.get('tail_logs', 0)) dump_errors = _read_boolean_setting(context, 'dump_errors') if tail_logs: dump_errors = True else: tail_logs = 50 context.dump_errors = dump_errors context.tail_logs = tail_logs # Configure system under test context.kubernetes_dir_path = context.config.userdata.get('kubernetes_dir', None) if context.kubernetes_dir_path is not None: context.docker_compose_path = None else: # If we're not running Kubernetes, use the local Docker Compose setup _set_default_compose_path(context) # for now, we just assume we know what compose file looks like (what services need what images) context.images = {} context.images['bayesian/bayesian-api'] = context.config.userdata.get( 'coreapi_server_image', 'registry.devshift.net/bayesian/bayesian-api') context.images['bayesian/cucos-worker'] = context.config.userdata.get( 'coreapi_worker_image', 'registry.devshift.net/bayesian/cucos-worker') coreapi_url = _read_url_from_env_var('F8A_API_URL') jobs_api_url = _read_url_from_env_var('F8A_JOB_API_URL') gremlin_url = _read_url_from_env_var('F8A_GREMLIN_URL') threescale_url = _read_url_from_env_var('F8A_3SCALE_URL') backbone_api_url = _read_url_from_env_var('F8A_BACKBONE_API_URL') service_id = _read_url_from_env_var('F8A_SERVICE_ID') gemini_api_url = _read_url_from_env_var('F8A_GEMINI_API_URL') license_service_url = _read_url_from_env_var('F8A_LICENSE_SERVICE_URL') context.running_locally = _running_locally(coreapi_url, jobs_api_url) check_test_environment(context, coreapi_url) context.coreapi_url = _get_url(context, coreapi_url, 'coreapi_url', _FABRIC8_ANALYTICS_SERVER) context.jobs_api_url = _get_url(context, jobs_api_url, 'jobs_api_url', _FABRIC8_ANALYTICS_JOBS) context.gremlin_url = _get_url(context, gremlin_url, "gremlin_url", _FABRIC8_GREMLIN_SERVICE) context.license_service_url = _get_url(context, license_service_url, 'license_service_url', _FABRIC8_LICENSE_SERVICE) context.threescale_url = threescale_url context.backbone_api_url = backbone_api_url context.service_id = service_id context.gemini_api_url = gemini_api_url # we can retrieve access token by using refresh/offline token context.access_token = retrieve_access_token(os.environ.get("RECOMMENDER_REFRESH_TOKEN"), os.environ.get("OSIO_AUTH_SERVICE")) # informations needed to access S3 database from tests _check_env_var_presence_s3_db('AWS_ACCESS_KEY_ID') _check_env_var_presence_s3_db('AWS_SECRET_ACCESS_KEY') _check_env_var_presence_s3_db('S3_REGION_NAME') aws_access_key_id = os.environ.get('AWS_ACCESS_KEY_ID') aws_secret_access_key = os.environ.get('AWS_SECRET_ACCESS_KEY') s3_region_name = os.environ.get('S3_REGION_NAME') deployment_prefix = os.environ.get('DEPLOYMENT_PREFIX', 'STAGE') context.s3interface = S3Interface(aws_access_key_id, aws_secret_access_key, s3_region_name, deployment_prefix) context.client = None # timeout values can be overwritten by environment variables stack_analysis_timeout = _parse_int_env_var('F8A_STACK_ANALYSIS_TIMEOUT') component_analysis_timeout = _parse_int_env_var('F8A_COMPONENT_ANALYSIS_TIMEOUT') context.stack_analysis_timeout = stack_analysis_timeout or _DEFAULT_STACK_ANALYSIS_TIMEOUT context.component_analysis_timeout = component_analysis_timeout \ or _DEFAULT_COMPONENT_ANALYSIS_TIMEOUT if context.running_locally: context.client = docker.AutoVersionClient() for desired, actual in context.images.items(): desired = 'registry.devshift.net/' + desired if desired != actual: context.client.tag(actual, desired, force=True) # Specify the analyses checked for when looking for "complete" results def _get_expected_component_analyses(ecosystem): common = context.EXPECTED_COMPONENT_ANALYSES specific = context.ECOSYSTEM_DEPENDENT_ANALYSES.get(ecosystem, set()) return common | specific context.get_expected_component_analyses = _get_expected_component_analyses def _compare_analysis_sets(actual, expected): unreliable = context.UNRELIABLE_ANALYSES missing = expected - actual - unreliable unexpected = actual - expected - unreliable return missing, unexpected context.compare_analysis_sets = _compare_analysis_sets context.EXPECTED_COMPONENT_ANALYSES = { 'metadata', 'source_licenses', 'digests', 'dependency_snapshot', 'code_metrics' # The follower workers are currently disabled by default: # 'static_analysis', 'binary_data', 'languages', 'crypto_algorithms' } # Analyses that are only executed for particular language ecosystems context.ECOSYSTEM_DEPENDENT_ANALYSES = dict() # Results that use a nonstandard format, so we don't check for the # standard "status", "summary", and "details" keys context.NONSTANDARD_ANALYSIS_FORMATS = set() # Analyses that are just plain unreliable and so need to be excluded from # consideration when determining whether or not an analysis is complete context.UNRELIABLE_ANALYSES = { 'github_details', # if no github api token provided 'security_issues' # needs Snyk vulndb in S3 }
def execute_actor(actor_id, worker_id, worker_ch, image, msg, d={}, privileged=False): """ Creates and runs an actor container and supervises the execution, collecting statistics about resource consumption from the Docker daemon. :param actor_id: the dbid of the actor; for updating worker status :param worker_id: the worker id; also for updating worker status :param worker_ch: NO LONGER USED. :param image: the actor's image; worker must have already downloaded this image to the local docker registry. :param msg: the message being passed to the actor. :param d: dictionary representing the environment to instantiate within the actor container. :param privileged: whether this actor is "privileged"; i.e., its container should run in privileged mode with the docker daemon mounted. :return: result (dict), logs (str) - `result`: statistics about resource consumption; `logs`: output from docker logs. """ # result = {'cpu': 0, 'io': 0, 'runtime': 0} cli = docker.AutoVersionClient(base_url=dd) d['MSG'] = msg binds = {} volumes = [] # if container is privileged, mount the docker daemon so that additional # containers can be started. if privileged: binds = { '/var/run/docker.sock': { 'bind': '/var/run/docker.sock', 'ro': False } } volumes = ['/var/run/docker.sock'] host_config = cli.create_host_config(binds=binds, privileged=privileged) container = cli.create_container(image=image, environment=d, volumes=volumes, host_config=host_config) try: cli.start(container=container.get('Id')) except Exception as e: # if there was an error starting the container, user will need to debig raise DockerStartContainerError( "Could not start container {}. Exception {}".format( container.get('Id'), str(e))) start = timeit.default_timer() Worker.update_worker_status(actor_id, worker_id, BUSY) running = True # create a separate cli for checkin stats objects since these should be fast and we don't want to wait stats_cli = docker.AutoVersionClient(base_url=dd, timeout=1) try: stats_obj = stats_cli.stats(container=container.get('Id'), decode=True) except ReadTimeout: # if the container execution is so fast that the initial stats object cannot be created, # we skip the running loop and return a minimal stats object result['cpu'] = 1 result['runtime'] = 1 return result while running: try: print("waiting on a stats obj: {}".format(timeit.default_timer())) stats = next(stats_obj) except ReadTimeoutError: print("next(stats) just timed out: {}".format( timeit.default_timer())) # container stopped before another stats record could be read, just ignore and move on running = False break try: result['cpu'] += stats['cpu_stats']['cpu_usage']['total_usage'] result['io'] += stats['network']['rx_bytes'] except KeyError: # as of docker 1.9, the stats object returns bytes that must be decoded # and the network key is now 'networks' with multiple subkeys. print("got a stats obj: {}".format(timeit.default_timer())) if type(stats) == bytes: stats = json.loads(stats.decode("utf-8")) result['cpu'] += stats['cpu_stats']['cpu_usage']['total_usage'] # even running docker 1.9, there seems to be a race condition where the 'networks' key doesn't # always get populated. try: result['io'] += stats['networks']['eth0']['rx_bytes'] except KeyError: pass print("Recorded a stats obj:".format(timeit.default_timer())) if running: try: print("waiting on cli.wait: {}".format(timeit.default_timer())) cli.wait(container=container.get('Id'), timeout=1) print("container finished: {}".format(timeit.default_timer())) running = False except ReadTimeout: print("cli.wait just timed out: {}".format( timeit.default_timer())) # the wait timed out so check if we are beyond the max_run_time runtime = timeit.default_timer() - start if max_run_time > 0 and max_run_time < runtime: print("hit runtime limit: {}".format( timeit.default_timer())) cli.stop(container.get('Id')) running = False print("container stopped:{}".format(timeit.default_timer())) stop = timeit.default_timer() # get info from container execution, including exit code try: container_info = cli.inspect_container(container.get('Id')) try: container_state = container_info['State'] try: exit_code = container_state['ExitCode'] except KeyError: print( "Could not determine ExitCode for container {} in ".format( container.get('Id'))) exit_code = 'undetermined' except KeyError: print( "Could not determine final state for container {} in ".format( container.get('Id'))) container_state = {'unavailable': True} except docker.errors.APIError as e: print("Could not inspect container {}".format(container.get('Id'))) # get logs from container logs = cli.logs(container.get('Id')) # remove container, ignore errors try: cli.remove_container(container=container) print("Container removed.") except Exception as e: print("Exception trying to remove actor: {}".format(e)) result['runtime'] = int(stop - start) return result, logs, container_state, exit_code
def run_container_with_docker(image, command, name=None, environment={}, log_file='service.log'): """ Run a container with docker mounted in it. Note: this function always mounts the abaco conf file so it should not be used by execute_actor(). """ logger.debug("top of run_container_with_docker().") cli = docker.AutoVersionClient(base_url=dd) # bind the docker socket as r/w since this container gets docker. volumes = ['/var/run/docker.sock'] binds = { '/var/run/docker.sock': { 'bind': '/var/run/docker.sock', 'ro': False } } # mount the abaco conf file. first we look for the environment variable, falling back to the value in Config. try: abaco_conf_host_path = os.environ.get('abaco_conf_host_path') if not abaco_conf_host_path: abaco_conf_host_path = Config.get('spawner', 'abaco_conf_host_path') logger.debug("docker_utils using abaco_conf_host_path={}".format( abaco_conf_host_path)) # mount config file at the root of the container as r/o volumes.append('/service.conf') binds[abaco_conf_host_path] = {'bind': '/service.conf', 'ro': True} except configparser.NoOptionError as e: # if we're here, it's bad. we don't have a config file. better to cut and run, msg = "Did not find the abaco_conf_host_path in Config. Exception: {}".format( e) logger.error(msg) raise DockerError(msg) # mount the logs file. volumes.append('/var/log/service.log') # first check to see if the logs directory config was set: try: logs_host_dir = Config.get('logs', 'host_dir') except (configparser.NoSectionError, configparser.NoOptionError): # if the directory is not configured, default it to abaco_conf_host_path logs_host_dir = os.path.dirname(abaco_conf_host_path) binds['{}/{}'.format(logs_host_dir, log_file)] = { 'bind': '/var/log/service.log', 'rw': True } host_config = cli.create_host_config(binds=binds) logger.debug("binds: {}".format(binds)) # create and start the container try: container = cli.create_container(image=image, environment=environment, volumes=volumes, host_config=host_config, command=command) cli.start(container=container.get('Id')) except Exception as e: msg = "Got exception trying to run container from image: {}. Exception: {}".format( image, e) logger.info(msg) raise DockerError(msg) logger.info("container started successfully: {}".format(container)) return container