def _get_client(self): url = self.hostname if 'unix' not in url: url = '{0}:{1}'.format(self.hostname, self.port) if not url.startswith('http'): url = 'http://{0}'.format(url) return client.Client(base_url=url)
def get_containers(self, show_all=False): c = client.Client( base_url='http://{0}:{1}'.format(self.hostname, self.port)) key = self._generate_container_cache_key(show_all) containers = cache.get(key) container_ids = [] if containers is None: try: containers = c.containers(all=show_all) except requests.ConnectionError: containers = [] # update meta data for x in containers: # only get first 12 chars of id (for metatdata) c_id = utils.get_short_id(x.get('Id')) # ignore stopped containers meta = c.inspect_container(c_id) m, created = Container.objects.get_or_create(container_id=c_id, host=self) m.is_running = meta.get('State', {}).get('Running', False) m.meta = json.dumps(meta) m.save() container_ids.append(c_id) # set extra containers to not running Container.objects.filter(host=self).exclude( container_id__in=container_ids).update(is_running=False) cache.set(key, containers, HOST_CACHE_TTL) return containers
def get_pod_info(self, container_id, pod_uuid=None): ''' Get UUID and PID for POD using "docker inspect" equivalent API ''' from docker import client os.environ['DOCKER_API_VERSION'] = '1.22' try: docker_client = client.Client() if docker_client is None: raise ParamsError(PARAMS_ERR_DOCKER_CONNECTION, 'Error creating docker client') container = docker_client.inspect_container(container_id) self.pod_pid = container['State']['Pid'] self.pod_uuid = \ container['Config']['Labels']['io.kubernetes.pod.uid'] except: # Dont report exception if pod_uuid set from argument already # pod-uuid will be specified in argument in case of UT if self.pod_uuid is None: raise ParamsError(PARAMS_ERR_GET_UUID, 'Error finding UUID for pod ' + container_id) if self.pod_pid is None: raise ParamsError(PARAMS_ERR_GET_PID, 'Error finding PID for pod ' + container_id) return
def setup(self): docker_cfg = self.container.config.get('DOCKER') if docker_cfg: self.client = client.Client(**docker_cfg) else: self.client = client.from_env() self.client.info() self.event_handlers = []
def get_images(self, show_all=False): c = client.Client( base_url='http://{0}:{1}'.format(self.hostname, self.port)) key = IMAGE_KEY.format(self.name) images = cache.get(key) if images is None: images = c.images(all=show_all) cache.set(key, images, HOST_CACHE_TTL) return images
def docker_client(): try: yield client.Client( base_url='unix:///var/run/docker.sock', version='1.22', timeout=300, ) except errors.APIError as e: raise
def _thd_start_instance(self, image, volumes): docker_client = client.Client(**self.client_args) found = False if image is not None: found = self._image_exists(docker_client, image) else: image = '%s_%s_image' % (self.workername, id(self)) if (not found) and (self.dockerfile is not None): log.msg("Image '%s' not found, building it from scratch" % image) for line in docker_client.build(fileobj=BytesIO(self.dockerfile.encode('utf-8')), tag=image): for streamline in _handle_stream_line(line): log.msg(streamline) if (not self._image_exists(docker_client, image)): log.msg("Image '%s' not found" % image) raise LatentWorkerFailedToSubstantiate( 'Image "%s" not found on docker host.' % image ) self.parse_volumes(volumes) self.hostconfig['binds'] = self.binds host_conf = docker_client.create_host_config(**self.hostconfig) instance = docker_client.create_container( image, self.command, name='%s_%s' % (self.workername, id(self)), volumes=self.volumes, environment=self.createEnvironment(), host_config=host_conf ) if instance.get('Id') is None: log.msg('Failed to create the container') raise LatentWorkerFailedToSubstantiate( 'Failed to start container' ) shortid = instance['Id'][:6] log.msg('Container created, Id: %s...' % (shortid,)) instance['image'] = image self.instance = instance docker_client.start(instance) log.msg('Container started') if self.followStartupLogs: logs = docker_client.attach( container=instance, stdout=True, stderr=True, stream=True) for line in logs: log.msg("docker VM %s: %s" % (shortid, line.strip())) if self.conn: break del logs return [instance['Id'], image]
def setUpClass(cls): """ Run the test containers in parallel at setup, so that the actual test methods are just checking the output. If timeout is set and killing container on timeout is enabled, then kill the containers, otherwise just log the timeout and keep waiting. """ super(ContainerTestContainer, cls).setUpClass() cls.cli = client.Client(base_url=conf['docker_url']) task_queue = conf['tasks'].keys() running_tasks = [] while len(task_queue) > 0 or len(running_tasks) > 0: while len(running_tasks) < conf['max_running_tasks'] and len(task_queue) > 0: task_name = task_queue.pop() task = conf['tasks'][task_name] image = task.get('image', conf['image']) entrypoint = task.get('entrypoint', conf['entrypoint']) env = {'KB_AUTH_TOKEN': task.get('KB_AUTH_TOKEN', conf.get('KB_AUTH_TOKEN')), 'KB_WORKSPACE_ID': task.get('KB_WORKSPACE_ID', conf.get('KB_WORKSPACE_ID')), 'ENVIRON': task.get('run_env', conf.get('run_env'))} logging.debug("Creating image:{0} entrypoint:{1} command: '{2}' env: {3}".format( image, entrypoint, task['command'], env)) con_name = ConName(task_name) cls.cli.create_container(image=image, command=task['command'], entrypoint=entrypoint, environment=env, name=con_name) cls.container_list.append(con_name) cls.cli.start(con_name) logging.info("Started container {0}".format(con_name)) running_tasks.append(con_name) finished = [] # setup the timer for TimeoutHandler if 'timeout' in conf: signal.signal(signal.SIGALRM, TimeoutHandler) signal.alarm(conf['timeout']) try: while len(finished) == 0: for containerId in running_tasks: state = cls.cli.inspect_container(containerId) if not state['State']['Running']: finished.append(containerId) if len(finished) == 0: time.sleep(conf['poll_interval']) except TimeoutException: logging.info("Timeout triggered while waiting for containers: " + ",".join(running_tasks)) if conf['kill_on_timeout'] is True: for containerId in running_tasks: logging.warning("Stopping container due to timeout: {}".format(containerId)) cls.cli.stop(containerId) time.sleep(10) # docker waits 10 seconds before sending SIGKILL to container except Exception as e: raise e for cid in finished: running_tasks.remove(cid) logging.info("Container {0} exited".format(cid))
def _thd_stop_instance(self, instance, fast): docker_client = client.Client(**self.client_args) log.msg('Stopping container %s...' % instance['Id'][:6]) docker_client.stop(instance['Id']) if not fast: docker_client.wait(instance['Id']) docker_client.remove_container(instance['Id'], v=True, force=True) if self.image is None: try: docker_client.remove_image(image=instance['image']) except docker.errors.APIError as e: log.msg('Error while removing the image: %s', e)
def build(client_url, repository, result, build_path, variables, tag): """ Prepare and build a container based on a Dockerfile. """ docker_client = client.Client() print "Building : '%s/%s'" % (repository, tag) save_in('%s/Dockerfile' % build_path, result) match, log = docker_client.build(path=build_path) if match != None: print 'Container id : %s' % match print docker_client.tag(match, '%s/%s' % (repository, tag)) print "Building is finished" return log
def get_images(self, show_all=False): c = client.Client( base_url='http://{0}:{1}'.format(self.hostname, self.port)) key = IMAGE_KEY.format(self.name) images = cache.get(key) if images is None: try: # only show images with a repository name images = [ x for x in c.images(all=show_all) if x.get('Repository') ] cache.set(key, images, HOST_CACHE_TTL) except requests.ConnectionError: images = [] return images
def search_repository(request): ''' Searches the docker index for repositories :param query: Query to search for ''' query = request.GET.get('query', {}) # get random host for query -- just needs a connection hosts = Host.objects.filter(enabled=True) rnd = random.randint(0, len(hosts) - 1) host = hosts[rnd] url = 'http://{0}:{1}'.format(host.hostname, host.port) c = client.Client(url, version='1.17') data = c.search(query) return HttpResponse(json.dumps(data), content_type='application/json')
def _thd_start_instance(self): docker_client = client.Client(base_url=self.docker_host) found = self._image_exists(docker_client) if (not found) and (self.dockerfile is not None): log.msg("Image '%s' not found, building it from scratch" % self.image) for line in docker_client.build(fileobj=BytesIO( self.dockerfile.encode('utf-8')), tag=self.image): log.msg(line) if not self._image_exists(docker_client): log.msg("Image '%s' not found" % self.image) raise interfaces.LatentBuildSlaveFailedToSubstantiate( 'Image "%s" not found on docker host.' % self.image) volumes = {} binds = {} for volume_string in self.volumes: try: volume = volume_string.split(":")[1] except IndexError: log.err("Invalid volume definition for docker " "{0}. Skipping...".format(volume_string)) continue volumes[volume] = {} volume, bind = volume_string.split(':', 1) binds[volume] = bind instance = docker_client.create_container( self.image, self.command, volumes=volumes, ) if instance.get('Id') is None: log.msg('Failed to create the container') raise interfaces.LatentBuildSlaveFailedToSubstantiate( 'Failed to start container') log.msg('Container created, Id: %s...' % instance['Id'][:6]) self.instance = instance docker_client.start(instance['Id'], binds=binds) return [instance['Id'], self.image]
def kill(container_id, url): if current_user.is_authenticated(): try: c = client.Client(version="1.6", base_url='http://%s:4243' % app.config['DOCKER_HOST']) c.kill(container_id) r = redis.StrictRedis(host=app.config['REDIS_HOST'], port=int(app.config['REDIS_PORT'])) r.delete(url) r.lrem(current_user.email, 1, url) except: print "unable to kill", container_id return redirect("/profile") else: return redirect("/")
def get_containers(self, show_all=False): c = client.Client( base_url='http://{0}:{1}'.format(self.hostname, self.port)) key = self._generate_container_cache_key(show_all) containers = cache.get(key) container_ids = [] if containers is None: containers = c.containers(all=show_all) # update meta data for x in containers: # only get first 12 chars of id (for metatdata) c_id = utils.get_short_id(x.get('Id')) # ignore stopped containers meta = c.inspect_container(c_id) m, created = Container.objects.get_or_create(container_id=c_id, host=self) m.meta = json.dumps(meta) m.save() container_ids.append(c_id) cache.set(key, containers, HOST_CACHE_TTL) return containers
def _thd_start_instance(self): docker_client = client.Client(**self.client_args) found = False if self.image is not None: found = self._image_exists(docker_client) image = self.image else: image = '%s_%s_image' % (self.slavename, id(self)) if (not found) and (self.dockerfile is not None): log.msg("Image '%s' not found, building it from scratch" % image) for line in docker_client.build(fileobj=BytesIO( self.dockerfile.encode('utf-8')), tag=image): for streamline in handle_stream_line(line): log.msg(streamline) if (not self._image_exists(docker_client, image)): log.msg("Image '%s' not found" % image) raise interfaces.LatentBuildSlaveFailedToSubstantiate( 'Image "%s" not found on docker host.' % image) instance = docker_client.create_container( image, self.command, name='%s_%s' % (self.slavename, id(self)), volumes=self.volumes, ) if instance.get('Id') is None: log.msg('Failed to create the container') raise interfaces.LatentBuildSlaveFailedToSubstantiate( 'Failed to start container') log.msg('Container created, Id: %s...' % instance['Id'][:6]) instance['image'] = image self.instance = instance docker_client.start(instance['Id'], binds=self.binds) log.msg('Container started') return [instance['Id'], self.image]
def get_docker_client(): kwargs = kwargs_from_env() if 'tls' in kwargs: # TODO, add an option to force tls. kwargs['tls'].assert_hostname = False return client.Client(version='auto', **kwargs)
from flask import abort import json from git import Git from docker import client from subprocess import Popen import shlex app = Flask(__name__) app.debug = True docker_client = client.Client() SAVE_DIR_BASE = "dest" GIT_REPO = "git://github.com/keeb/blog" @app.route('/', methods=["POST"]) def index(): info = json.loads(request.form['payload']) rep_id = info["commits"][0]["id"][0:5] save_dir = "%s/%s" % (SAVE_DIR_BASE, rep_id) tag = 'keeb/blog-snapshot-%s' % rep_id if not os.path.exists(save_dir): os.makedirs(save_dir) clone(GIT_REPO, save_dir)
def _getDockerClient(self): if docker.version[0] == '1': docker_client = client.Client(**self.client_args) else: docker_client = client.APIClient(**self.client_args) return docker_client
from docker import client cli = client.Client()
def new(service): if current_user.is_authenticated(): r = redis.StrictRedis(host=app.config['REDIS_HOST'], port=int(app.config['REDIS_PORT'])) c = client.Client(version="1.6", base_url='http://%s:4243' % app.config['DOCKER_HOST']) exposed_ports = [] # !! TODO try/expect dockerfile = "" docker_path = "" if path.exists(path.join(app.config['SERVICES_FOLDER'], service, app.config['SERVICE_DICT']['dockerfile'])): dockerfile = "services/"+service+"/"+app.config['SERVICE_DICT']['dockerfile'] docker_path = "services/"+service+"/docker/" elif path.exists(path.join(app.config['SERVICES_FOLDER'], service, "Dockerfile")): dockerfile = "services/"+service+"/Dockerfile" docker_path = "services/"+service+"/" # if dockerfile is still "" # docker index else: # !! TODO try/except service = service.replace("-", "/", 1) c.pull(service) container = c.create_container(service) container_id = container["Id"] c.start(container, publish_all_ports=True) b = c.inspect_container(container) ports = b['NetworkSettings']['PortMapping']['Tcp'] for key,value in ports.items(): exposed_ports.append(key) url = store_metadata(exposed_ports, c, r, container_id, service, container) return jsonify(url=url) with open(dockerfile, 'r') as content_file: for line in content_file: if line.startswith("EXPOSE"): line = line.strip() line_a = line.split(" ") for port in line_a[1:]: exposed_ports.append(port) container = [] image_id_path = "services/"+service+"/.image_id" try: image_id = "JUNK" with open(image_id_path, 'r') as content_file: image_id = content_file.read() container = c.create_container(image_id) except: # !! TODO try/except image_id, response = c.build(path=docker_path, tag=service) # !! TODO leaving in for debugging for now print image_id, response with open(image_id_path, 'w') as content_file: content_file.write(image_id) container = c.create_container(image_id) container_id = container["Id"] c.start(container, publish_all_ports=True) url = store_metadata(exposed_ports, c, r, container_id, service, container) return jsonify(url=url) else: return redirect("/")
def __init__(self): client = api_client.Client(base_url='unix://var/run/docker.sock') self.projects = projects.ProjectManager(client) self.images = images.ImageManager(client) self.containers = containers.ContainerManager(client)
def _get_client(self): url = '{0}:{1}'.format(self.hostname, self.port) if not url.startswith('http'): url = 'http://{0}'.format(url) return client.Client(url)
def _get_client(self): return client.Client()
def __init__(self, url='unix://var/run/docker.sock'): self.cli = client.Client(base_url=url)
def _thd_stop_instance(self, instance, fast): docker_client = client.Client(self.docker_host) log.msg('Stopping container %s...' % instance['Id'][:6]) docker_client.stop(instance['Id']) if not fast: docker_client.wait(instance['Id'])