class Docker: dockerconf = KOOPLEX.get('docker', {}) def __init__(self): base_url = self.dockerconf.get('base_url', '') self.client = Client(base_url=base_url) logger.debug("Client init") self.check = None def list_imagenames(self): logger.debug("Listing image names") pattern_imagenamefilter = KOOPLEX.get('docker', {}).get( 'pattern_imagename_filter', r'^image-%(\w+):\w$') for image in self.client.images(all=True): if image['RepoTags'] is None: continue for tag in image['RepoTags']: if re.match(pattern_imagenamefilter, tag): _, imagename, _ = re.split(pattern_imagenamefilter, tag) logger.debug("Found image: %s" % imagename) yield imagename def list_volumenames(self): logger.debug("Listing volume names") volumes = self.client.volumes() for volume in volumes['Volumes']: yield volume['Name'] def create_volume(self, volume): volume_dir = None #self.dockerconf.get('volume_dir', '') if volume_dir: self.client.create_volume(name=volume.name, driver='local', driver_opts={ 'device': '%s/%s/' % (volume_dir, volume.name), 'o': 'bind', 'type': 'none' }) else: self.client.create_volume(name=volume.name, ) logger.debug("Volume %s created" % volume.name) return True #self.get_container(container) def delete_volume(self, volume): self.client.remove_volume(name=volume.name) logger.debug("Volume %s deleted" % volume.name) def get_container(self, container): for item in self.client.containers(all=True): # docker API prepends '/' in front of container names if '/' + container.name in item['Names']: logger.debug("Get container %s" % container.name) return item return None def create_container(self, container): volumes = [] # the list of mount points in the container binds = {} # a mapping dictionary of the container mounts for volume in container.volumes: logger.debug("container %s, volume %s" % (container, volume)) mp = volume.mountpoint volumes.append(mp) binds[volume.name] = { 'bind': mp, 'mode': volume.mode(container.user) } logger.debug("container %s binds %s" % (container, binds)) host_config = self.client.create_host_config( binds=binds, privileged=True, mem_limit='2g', memswap_limit='170m', mem_swappiness=0, # oom_kill_disable = True, cpu_shares=2, ) network = self.dockerconf.get('network', 'host') networking_config = {'EndpointsConfig': {network: {}}} ports = self.dockerconf.get('container_ports', [8000, 9000]) imagename = container.image.imagename if container.image else self.dockerconf.get( 'default_image', 'basic') args = { 'name': container.name, 'image': imagename, 'detach': True, 'hostname': container.name, 'host_config': host_config, 'networking_config': networking_config, 'environment': container.environment, 'volumes': volumes, 'ports': ports, } self.client.create_container(**args) logger.debug("Container created") self.managemount(container) #FIXME: check if not called twice return self.get_container(container) def _writefile(self, container_name, path, filename, content): import tarfile import time from io import BytesIO tarstream = BytesIO() tar = tarfile.TarFile(fileobj=tarstream, mode='w') tarinfo = tarfile.TarInfo(name=filename) tarinfo.size = len(content) tarinfo.mtime = time.time() tar.addfile(tarinfo, BytesIO(content)) tar.close() tarstream.seek(0) try: status = self.client.put_archive(container=container_name, path=path, data=tarstream) logger.info("container %s put_archive %s/%s returns %s" % (container_name, path, filename, status)) except Exception as e: logger.error("container %s put_archive %s/%s fails -- %s" % (container_name, path, filename, e)) def managemount(self, container): from kooplex.lib.fs_dirname import Dirname path, filename = os.path.split( self.dockerconf.get('mountconf', '/tmp/mount.conf')) mapper = [] for v in container.volumes: mapper.extend([ "%s:%s" % (v.volumetype, d) for d in Dirname.containervolume_listfolders(container, v) ]) #NOTE: mounter uses read to process the mapper configuration, thus we need to make sure '\n' terminates the config mapper file mapper.append('') logger.debug("container %s map %s" % (container, mapper)) file_data = "\n".join(mapper).encode('utf8') self._writefile(container.name, path, filename, file_data) def trigger_impersonator(self, vcproject): #FIXME: dont call it 1-by-1 from kooplex.lib.fs_dirname import Dirname container_name = self.dockerconf.get('impersonator', 'impersonator') path, filename = os.path.split( self.dockerconf.get('gitcommandconf', '/tmp/gitcommand.conf')) cmdmaps = [] token = vcproject.token fn_clonesh = os.path.join(Dirname.vcpcache(vcproject), "clone.sh") fn_key = os.path.join(Dirname.userhome(vcproject.token.user), '.ssh', token.fn_rsa) cmdmaps.append( "%s:%s:%s:%s" % (token.user.username, fn_key, token.repository.domain, fn_clonesh)) cmdmaps.append('') file_data = "\n".join(cmdmaps).encode('utf8') self._writefile(container_name, path, filename, file_data) def run_container(self, container): docker_container_info = self.get_container(container) if docker_container_info is None: logger.debug("Container did not exist, Creating new one") docker_container_info = self.create_container(container) container_state = docker_container_info['Status'] if container_state == 'Created' or container_state.startswith( 'Exited'): logger.debug("Starting container") self.start_container(container) def refresh_container_state(self, container): docker_container_info = self.get_container(container) container_state = docker_container_info['State'] logger.debug("Container state %s" % container_state) container.last_message = str(container_state) container.last_message_at = now() container.save() def start_container(self, container): self.client.start(container.name) # we need to retrieve the container state after starting it docker_container_info = self.get_container(container) container_state = docker_container_info['State'] logger.debug("Container state %s" % container_state) container.last_message = str(container_state) container.last_message_at = now() assert container_state == 'running', "Container failed to start: %s" % docker_container_info def stop_container(self, container): try: self.client.stop(container.name) container.last_message = 'Container stopped' except Exception as e: logger.warn("docker container not found by API -- %s" % e) container.last_message = str(e) def remove_container(self, container): try: self.client.remove_container(container.name) container.last_message = 'Container removed' container.last_message_at = now() except Exception as e: logger.warn("docker container not found by API -- %s" % e) container.last_message = str(e) container.last_message_at = now() logger.debug("Container removed %s" % container.name) #FIXME: az execute2 lesz az igazi... def execute(self, container, command): logger.info("execution: %s in %s" % (command, container)) execution = self.client.exec_create(container=container.name, cmd=shlex.split(command)) return self.client.exec_start(execution, detach=False) def execute2(self, container, command): logger.info("execution: %s in %s" % (command, container)) execution = self.client.exec_create(container=container.name, cmd=shlex.split(command)) response = self.client.exec_start(exec_id=execution['Id'], stream=False) check = self.client.exec_inspect(exec_id=execution['Id']) self.check = check if check['ExitCode'] != 0: logger.error('Execution %s in %s failed -- %s' % (command, container, check)) return response.decode()
def getImages(self,daemon_address=None): c = Client(daemon_address) result = Bag() for i,image in enumerate(c.images()): result['r_%i' %i] = Bag(image) return result
class Docker: dockerconf = KOOPLEX.get('docker', {}) def __init__(self): base_url = self.dockerconf.get('base_url', '') self.client = Client(base_url = base_url) logger.debug("Client init") self.check = None def list_imagenames(self): logger.debug("Listing image names") pattern_imagenamefilter = KOOPLEX.get('docker', {}).get('pattern_imagename_filter', r'^image-%(\w+):\w$') for image in self.client.images(all = True): if image['RepoTags'] is None: continue for tag in image['RepoTags']: if re.match(pattern_imagenamefilter, tag): _, imagename, _ = re.split(pattern_imagenamefilter, tag) logger.debug("Found image: %s" % imagename) yield imagename def list_volumenames(self): logger.debug("Listing volume names") volumes = self.client.volumes() for volume in volumes['Volumes']: yield volume['Name'] def get_container(self, container): for item in self.client.containers(all = True): # docker API prepends '/' in front of container names if '/' + container.name in item['Names']: logger.debug("Get container %s" % container.name) return item return None def create_container(self, container): volumes = [] # the list of mount points in the container binds = {} # a mapping dictionary of the container mounts for volume in container.volumes: logger.debug("container %s, volume %s" % (container, volume)) mp = volume.mountpoint volumes.append(mp) binds[volume.name] = { 'bind': mp, 'mode': volume.mode(container.user) } logger.debug("container %s binds %s" % (container, binds)) host_config = self.client.create_host_config( binds = binds, privileged = True, mem_limit = '2g', memswap_limit = '170m', mem_swappiness = 0, # oom_kill_disable = True, cpu_shares = 2, ) network = self.dockerconf.get('network', 'host') networking_config = { 'EndpointsConfig': { network: {} } } ports = self.dockerconf.get('container_ports', [ 8000, 9000 ]) imagename = container.image.imagename if container.image else self.dockerconf.get('default_image', 'basic') args = { 'name': container.name, 'image': imagename, 'detach': True, 'hostname': container.name, 'host_config': host_config, 'networking_config': networking_config, 'environment': container.environment, 'volumes': volumes, 'ports': ports, } self.client.create_container(**args) logger.debug("Container created") self.managemount(container) #FIXME: check if not called twice return self.get_container(container) def _writefile(self, container_name, path, filename, content): import tarfile import time from io import BytesIO tarstream = BytesIO() tar = tarfile.TarFile(fileobj = tarstream, mode = 'w') tarinfo = tarfile.TarInfo(name = filename) tarinfo.size = len(content) tarinfo.mtime = time.time() tar.addfile(tarinfo, BytesIO(content)) tar.close() tarstream.seek(0) try: status = self.client.put_archive(container = container_name, path = path, data = tarstream) logger.info("container %s put_archive %s/%s returns %s" % (container_name, path, filename, status)) except Exception as e: logger.error("container %s put_archive %s/%s fails -- %s" % (container_name, path, filename, e)) def managemount(self, container): from kooplex.lib.fs_dirname import Dirname path, filename = os.path.split(self.dockerconf.get('mountconf', '/tmp/mount.conf')) mapper = [] for v in container.volumes: mapper.extend([ "%s:%s" % (v.volumetype, d) for d in Dirname.containervolume_listfolders(container, v) ]) #NOTE: mounter uses read to process the mapper configuration, thus we need to make sure '\n' terminates the config mapper file mapper.append('') logger.debug("container %s map %s" % (container, mapper)) file_data = "\n".join(mapper).encode('utf8') self._writefile(container.name, path, filename, file_data) def trigger_impersonator(self, vcproject): #FIXME: dont call it 1-by-1 from kooplex.lib.fs_dirname import Dirname container_name = self.dockerconf.get('impersonator', 'impersonator') path, filename = os.path.split(self.dockerconf.get('gitcommandconf', '/tmp/gitcommand.conf')) cmdmaps = [] token = vcproject.token fn_clonesh = os.path.join(Dirname.vcpcache(vcproject), "clone.sh") fn_key = os.path.join(Dirname.userhome(vcproject.token.user), '.ssh', token.fn_rsa) cmdmaps.append("%s:%s:%s:%s" % (token.user.username, fn_key, token.repository.domain, fn_clonesh)) cmdmaps.append('') file_data = "\n".join(cmdmaps).encode('utf8') self._writefile(container_name, path, filename, file_data) def run_container(self, container): docker_container_info = self.get_container(container) if docker_container_info is None: logger.debug("Container did not exist, Creating new one") docker_container_info = self.create_container(container) container_state = docker_container_info['Status'] if container_state == 'Created' or container_state.startswith('Exited'): logger.debug("Starting container") self.start_container(container) def refresh_container_state(self, container): docker_container_info = self.get_container(container) container_state = docker_container_info['State'] logger.debug("Container state %s" % container_state) container.last_message = str(container_state) container.last_message_at = now() container.save() def start_container(self, container): self.client.start(container.name) # we need to retrieve the container state after starting it docker_container_info = self.get_container(container) container_state = docker_container_info['State'] logger.debug("Container state %s" % container_state) container.last_message = str(container_state) container.last_message_at = now() assert container_state == 'running', "Container failed to start: %s" % docker_container_info def stop_container(self, container): try: self.client.stop(container.name) container.last_message = 'Container stopped' except Exception as e: logger.warn("docker container not found by API -- %s" % e) container.last_message = str(e) def remove_container(self, container): try: self.client.remove_container(container.name) container.last_message = 'Container removed' container.last_message_at = now() except Exception as e: logger.warn("docker container not found by API -- %s" % e) container.last_message = str(e) container.last_message_at = now() logger.debug("Container removed %s" % container.name) #FIXME: az execute2 lesz az igazi... def execute(self, container, command): logger.info("execution: %s in %s" % (command, container)) execution = self.client.exec_create(container = container.name, cmd = shlex.split(command)) return self.client.exec_start(execution, detach = False) def execute2(self, container, command): logger.info("execution: %s in %s" % (command, container)) execution = self.client.exec_create(container = container.name, cmd = shlex.split(command)) response = self.client.exec_start(exec_id = execution['Id'], stream = False) check = self.client.exec_inspect(exec_id = execution['Id']) self.check = check if check['ExitCode'] != 0: logger.error('Execution %s in %s failed -- %s' % (command, container, check)) return response.decode()
class DockerPyClient(DockerClient): def __init__(self, remote, username=None, password=None, email=None): super(DockerPyClient,self).__init__() self.client = Client(base_url=remote, version='1.15') self.log = logging.getLogger(__name__) self.log.debug('password %s, remote = %s, username=%s', password, remote, username) if username: self.client.login(username=username, password=password, email=email) def docker_images(self, filters=None): return self.client.images(filters=filters) def __id(self, ioc): if ioc and 'Id' in ioc: return ioc['Id'] return None def docker_containers(self): return [{ 'Id': cont['Id'], 'Tag': cont['Image'], 'Image': self.__id(self.image(cont['Image'])), 'Names': cont['Names'], 'Ports': cont['Ports'], 'Created': cont['Created'], 'Command': cont['Command'], 'Status': cont['Status'], 'Running': cont['Status'].startswith('Up ') or cont['Status'].startswith('Restarting ') } for cont in self.client.containers(all=True)] def docker_pull(self, image): (repository, tag) = self.tag(image) existing = self.image(image) for line in self.client.pull(repository=repository, stream=True, insecure_registry=True): parsed = json.loads(line) self.log.debug('parsed %s' % parsed) if 'error' in parsed: raise Exception(parsed['error']) # Check if image updated self.flush_images() newer = self.image(image) if not existing or (newer['Id'] != existing['Id']): return True return False def docker_run(self, entry): volumes = ['/var/log/ext'] kwargs = { 'image': entry['image'], 'volumes': volumes, 'detach': True, 'environment': { 'DOCKER_IMAGE': entry['image'] } } if 'name' in entry: kwargs['name'] = entry['name'] if 'env' in entry: kwargs['environment'].update(entry['env']) if 'cpu' in entry: kwargs['cpu_shares'] = entry['cpu'] if 'memory' in entry: kwargs['mem_limit'] = entry['memory'] if 'entrypoint' in entry: kwargs['entrypoint'] = entry['entrypoint'] if 'command' in entry: kwargs['command'] = entry['command'] if 'volumes' in entry: volumes.extend([vol['containerPath'] for vol in entry['volumes'] if 'containerPath' in vol]) volsFrom = [vol['from'] for vol in entry['volumes'] if 'from' in vol] if len(volsFrom): kwargs['volumes_from'] = volsFrom if 'portMappings' in entry: kwargs['ports'] = [p['containerPort'] for p in entry['portMappings']] container = self.client.create_container(**kwargs) self.docker_start(container['Id'], entry) return container['Id'] def docker_start(self, container, entry=None): logsBound = False binds = {} restart_policy = 'on-failure' kwargs = { 'container': container, 'binds': binds } if entry is not None: if 'network' in entry: kwargs['network_mode'] = entry['network'] if 'privileged' in entry: kwargs['privileged'] = entry['privileged'] if 'volumes' in entry: volsFrom = [] for vol in entry['volumes']: if 'from' in vol: volsFrom.append(vol['from']) continue if not 'containerPath' in vol: self.log.warn('No container mount point specified, skipping volume') continue if not 'hostPath' in vol: # Just a local volume, no bindings continue binds[vol['hostPath']] = { 'bind': vol['containerPath'], 'ro': 'mode' in vol and vol['mode'].lower() == 'ro' } if vol['containerPath'] == '/var/log/ext': logsBound = True if len(volsFrom): kwargs['volumes_from'] = volsFrom if 'portMappings' in entry: portBinds = {} for pm in entry['portMappings']: portBinds[pm['containerPort']] = pm['hostPort'] if 'hostPort' in pm else None kwargs['port_bindings'] = portBinds if 'links' in entry: kwargs['links'] = entry['links'] if 'restart' in entry: restart_policy = entry['restart'] kwargs['restart_policy'] = { 'MaximumRetryCount': 0, 'Name': restart_policy } if not logsBound: binds['/var/log/ext/%s' % container] = { 'bind': '/var/log/ext', 'ro': False } self.client.start(**kwargs); def docker_signal(self, container, sig='HUP'): self.client.kill(container, sig) def docker_restart(self, container): self.client.restart(container) def docker_stop(self, container): self.client.stop(container) def docker_rm(self, container): self.client.remove_container(container) def docker_rmi(self, image): # Force removal, sometimes conflicts result from truncated pulls when # dockerup container upgrades/dies self.client.remove_image(image, force=True)
class DockerPyClient(DockerClient): def __init__(self, remote, username=None, password=None, email=None): super(DockerPyClient,self).__init__() self.client = Client(base_url=remote, version='1.15') if username: self.client.login(username=username, password=password, email=email) def docker_images(self, filters=None): return self.client.images(filters=filters) def __id(self, ioc): if ioc and 'Id' in ioc: return ioc['Id'] return None def docker_containers(self): return [{ 'Id': cont['Id'], 'Tag': cont['Image'], 'Image': self.__id(self.image(cont['Image'])), 'Names': cont['Names'], 'Ports': cont['Ports'], 'Created': cont['Created'], 'Command': cont['Command'], 'Status': cont['Status'], 'Running': cont['Status'].startswith('Up ') or cont['Status'].startswith('Restarting ') } for cont in self.client.containers(all=True)] def docker_pull(self, image): (repository, tag) = self.tag(image) existing = self.image(image) for line in self.client.pull(repository=repository, stream=True, insecure_registry=True): parsed = json.loads(line) if 'error' in parsed: raise Exception(parsed['error']) # Check if image updated self.flush_images() newer = self.image(image) if not existing or (newer['Id'] != existing['Id']): return True return False def docker_run(self, entry): volumes = ['/var/log/ext'] kwargs = { 'image': entry['image'], 'volumes': volumes, 'detach': True, 'environment': { 'DOCKER_IMAGE': entry['image'] } } if 'name' in entry: kwargs['name'] = entry['name'] if 'env' in entry: kwargs['environment'].update(entry['env']) if 'cpu' in entry: kwargs['cpu_shares'] = entry['cpu'] if 'memory' in entry: kwargs['mem_limit'] = entry['memory'] if 'entrypoint' in entry: kwargs['entrypoint'] = entry['entrypoint'] if 'command' in entry: kwargs['command'] = entry['command'] if 'volumes' in entry: volumes.extend([vol['containerPath'] for vol in entry['volumes'] if 'containerPath' in vol]) volsFrom = [vol['from'] for vol in entry['volumes'] if 'from' in vol] if len(volsFrom): kwargs['volumes_from'] = volsFrom if 'portMappings' in entry: kwargs['ports'] = [p['containerPort'] for p in entry['portMappings']] container = self.client.create_container(**kwargs) self.docker_start(container['Id'], entry) return container['Id'] def docker_start(self, container, entry=None): logsBound = False binds = {} restart_policy = 'on-failure' kwargs = { 'container': container, 'binds': binds } if entry is not None: if 'network' in entry: kwargs['network_mode'] = entry['network'] if 'privileged' in entry: kwargs['privileged'] = entry['privileged'] if 'volumes' in entry: volsFrom = [] for vol in entry['volumes']: if 'from' in vol: volsFrom.append(vol['from']) continue if not 'containerPath' in vol: self.log.warn('No container mount point specified, skipping volume') continue if not 'hostPath' in vol: # Just a local volume, no bindings continue binds[vol['hostPath']] = { 'bind': vol['containerPath'], 'ro': 'mode' in vol and vol['mode'].lower() == 'ro' } if vol['containerPath'] == '/var/log/ext': logsBound = True if len(volsFrom): kwargs['volumes_from'] = volsFrom if 'portMappings' in entry: portBinds = {} for pm in entry['portMappings']: portBinds[pm['containerPort']] = pm['hostPort'] if 'hostPort' in pm else None kwargs['port_bindings'] = portBinds if 'links' in entry: kwargs['links'] = entry['links'] if 'restart' in entry: restart_policy = entry['restart'] kwargs['restart_policy'] = { 'MaximumRetryCount': 0, 'Name': restart_policy } if not logsBound: binds['/var/log/ext/%s' % container] = { 'bind': '/var/log/ext', 'ro': False } self.client.start(**kwargs); def docker_signal(self, container, sig='HUP'): self.client.kill(container, sig) def docker_restart(self, container): self.client.restart(container) def docker_stop(self, container): self.client.stop(container) def docker_rm(self, container): self.client.remove_container(container) def docker_rmi(self, image): # Force removal, sometimes conflicts result from truncated pulls when # dockerup container upgrades/dies self.client.remove_image(image, force=True)
def getImages(self, daemon_address=None): c = Client(daemon_address) result = Bag() for i, image in enumerate(c.images()): result['r_%i' % i] = Bag(image) return result
class Backend(object): ''' This is the base backend builder class which all builders inherit from. Contains utility functions and defines the public API. ''' # building = '' # base_image = '' def __init__(self, project): self.project = project self.checkout_directory = project.get_checkout_directory() self.artifact_directory = project.get_artifact_directory() self.serve_directory = project.get_serve_directory() self.image_name = self.get_image_name() self.container_name = self.get_container_name() kwargs = kwargs_from_env() if settings.DEBUG and osx: # development helper for boot2docker users kwargs['tls'].assert_hostname = False self.docker = Client(**kwargs) if not os.path.exists(self.checkout_directory): raise NonexistantCheckout( 'No such checkout: %s' % self.checkout_directory ) if not os.path.exists(self.artifact_directory): os.makedirs(self.artifact_directory) if not os.path.exists(self.serve_directory): os.makedirs(self.serve_directory) def build_command(self): raise NotImplementedError def setup_commands(self): raise NotImplementedError def build(self): try: self.setup_container() command = self.build_command() proc = self.docker_run(command) stdout, stderr = proc.communicate() if proc.returncode != 0: raise Exception('Build failure: %s' % stderr) print stdout self.link_artifacts() finally: self.remove_container() def get_image_name(self): ''' The image name is used for the persistant image shared between builds ''' return 'berth/%s-project-%d' % (self.building, self.project.id) def get_container_name(self): ''' The container name is used for the temporary state between commited images ''' return 'temp-%d' % self.project.id def link_artifacts(self): try: os.unlink(self.serve_directory) except OSError as exc: if exc.errno != errono.ENOENT: raise os.symlink(self.artifact_directory, self.serve_directory) def commit_container(self): proc = subprocess.Popen( ['docker', 'commit', self.container_name, self.image_name], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) stdout, stderr = proc.communicate() if proc.returncode != 0: raise Exception('Could not commit container: %s' % stderr) def remove_container(self): proc = subprocess.Popen( ['docker', 'rm', self.container_name], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) stdout, stderr = proc.communicate() if proc.returncode != 0: raise Exception('Could not remove container: %s' % stderr) def setup_container(self): if self.image_exists(): image_name = self.image_name else: image_name = self.base_image for command in self.setup_commands(): proc = self.docker_run(command, image_name) stdout, stderr = proc.communicate() if proc.returncode != 0: raise Exception('summin went wrong: %s\n%s' % (stdout, stderr)) print stdout self.commit_container() self.remove_container() image_name = self.image_name def image_exists(self): return len(self.docker.images(name=self.image_name)) > 0 def docker_run(self, command, image_name=None): if image_name is None: image_name = self.image_name cmd = [ 'docker', 'run', '-v', '%s:/root/build/docs' % self.checkout_directory, '-v', '%s:/root/build/artifacts' % self.artifact_directory, '-w', '/root/build/docs', '--name', self.container_name, image_name, ] cmd.extend(command) return subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.checkout_directory, )
class DockerController(object): def _load_config(self): config = os.environ.get('BROWSER_CONFIG', './config.yaml') with open(config) as fh: config = yaml.load(fh) config = config['browser_config'] for n, v in config.items(): new_v = os.environ.get(n) if not new_v: new_v = os.environ.get(n.upper()) if new_v: print('Setting Env Val: {0}={1}'.format(n, new_v)) config[n] = new_v return config def __init__(self): config = self._load_config() self.name = config['cluster_name'] self.label_name = config['label_name'] self.init_req_expire_secs = config['init_req_expire_secs'] self.queue_expire_secs = config['queue_expire_secs'] self.remove_expired_secs = config['remove_expired_secs'] self.api_version = config['api_version'] self.ports = config['ports'] self.port_bindings = dict((port, None) for port in self.ports.values()) self.max_containers = config['max_containers'] self.throttle_expire_secs = config['throttle_expire_secs'] self.browser_image_prefix = config['browser_image_prefix'] self.label_browser = config['label_browser'] self.label_prefix = config['label_prefix'] self.network_name = config['network_name'] self.volume_source = config['browser_volumes'] self.shm_size = config['shm_size'] self.default_browser = config['default_browser'] self._init_cli() while True: try: self._init_redis(config) break except BusyLoadingError: print('Waiting for Redis to Load...') time.sleep(5) def _init_cli(self): if os.path.exists('/var/run/docker.sock'): self.cli = Client(base_url='unix://var/run/docker.sock', version=self.api_version) else: kwargs = kwargs_from_env(assert_hostname=False) kwargs['version'] = self.api_version self.cli = Client(**kwargs) def _init_redis(self, config): redis_url = os.environ['REDIS_BROWSER_URL'] self.redis = redis.StrictRedis.from_url(redis_url, decode_responses=True) self.redis.setnx('next_client', '1') self.redis.setnx('max_containers', self.max_containers) self.redis.setnx('num_containers', '0') # TODO: support this #self.redis.set('cpu_auto_adjust', config['cpu_auto_adjust']) # if num_containers is invalid, reset to 0 try: assert (int(self.redis.get('num_containers') >= 0)) except: self.redis.set('num_containers', 0) self.redis.set('throttle_samples', config['throttle_samples']) self.redis.set('throttle_max_avg', config['throttle_max_avg']) self.duration = int(config['container_expire_secs']) self.redis.set('container_expire_secs', self.duration) def load_avail_browsers(self, params=None): filters = {"dangling": False} if params: all_filters = [] for k, v in params.items(): if k not in ('short'): all_filters.append(self.label_prefix + k + '=' + v) filters["label"] = all_filters else: filters["label"] = self.label_browser browsers = {} try: images = self.cli.images(filters=filters) for image in images: tags = image.get('RepoTags') id_ = self._get_primary_id(tags) if not id_: continue props = self._browser_info(image['Labels']) props['id'] = id_ browsers[id_] = props except: traceback.print_exc() return browsers def _get_primary_id(self, tags): if not tags: return None primary_tag = None for tag in tags: if not tag: continue if tag.endswith(':latest'): tag = tag.replace(':latest', '') if not tag.startswith(self.browser_image_prefix): continue # pick the longest tag as primary tag if not primary_tag or len(tag) > len(primary_tag): primary_tag = tag if primary_tag: return primary_tag[len(self.browser_image_prefix):] else: return None def get_browser_info(self, name, include_icon=False): tag = self.browser_image_prefix + name try: image = self.cli.inspect_image(tag) tags = image.get('RepoTags') props = self._browser_info(image['Config']['Labels'], include_icon=include_icon) props['id'] = self._get_primary_id(tags) props['tags'] = tags return props except: traceback.print_exc() return {} def _browser_info(self, labels, include_icon=False): props = {} caps = [] for n, v in labels.items(): wr_prop = n.split(self.label_prefix) if len(wr_prop) != 2: continue name = wr_prop[1] if not include_icon and name == 'icon': continue props[name] = v if name.startswith('caps.'): caps.append(name.split('.', 1)[1]) props['caps'] = ', '.join(caps) return props def _get_host_port(self, info, port, default_host): info = info['NetworkSettings']['Ports'][str(port) + '/tcp'] info = info[0] host = info['HostIp'] if host == '0.0.0.0' and default_host: host = default_host return host + ':' + info['HostPort'] def _get_port(self, info, port): info = info['NetworkSettings']['Ports'][str(port) + '/tcp'] info = info[0] return info['HostPort'] def sid(self, id): return id[:12] def timed_new_container(self, browser, env, host, reqid): start = time.time() info = self.new_container(browser, env, host) end = time.time() dur = end - start time_key = 't:' + reqid self.redis.setex(time_key, self.throttle_expire_secs, dur) throttle_samples = int(self.redis.get('throttle_samples')) print('INIT DUR: ' + str(dur)) self.redis.lpush('init_timings', time_key) self.redis.ltrim('init_timings', 0, throttle_samples - 1) return info def new_container(self, browser_id, env=None, default_host=None): #browser = self.browsers.get(browser_id) browser = self.get_browser_info(browser_id) # get default browser if not browser: browser = self.get_browser_info(browser_id) #browser = self.browsers.get(self.default_browser) if browser.get('req_width'): env['SCREEN_WIDTH'] = browser.get('req_width') if browser.get('req_height'): env['SCREEN_HEIGHT'] = browser.get('req_height') image = browser['tags'][0] print('Launching ' + image) short_id = None try: host_config = self.create_host_config() container = self.cli.create_container( image=image, ports=list(self.ports.values()), environment=env, host_config=host_config, labels={self.label_name: self.name}, ) id_ = container.get('Id') short_id = self.sid(id_) res = self.cli.start(container=id_) info = self.cli.inspect_container(id_) ip = info['NetworkSettings']['IPAddress'] if not ip: ip = info['NetworkSettings']['Networks'][ self.network_name]['IPAddress'] self.redis.hset('all_containers', short_id, ip) result = {} for port_name in self.ports: result[port_name + '_host'] = self._get_host_port( info, self.ports[port_name], default_host) result['id'] = short_id result['ip'] = ip result['audio'] = os.environ.get('AUDIO_TYPE', '') return result except Exception as e: traceback.print_exc() if short_id: print('EXCEPTION: ' + short_id) self.remove_container(short_id) return {} def create_host_config(self): if self.volume_source: volumes_from = [self.volume_source] else: volumes_from = None host_config = self.cli.create_host_config( port_bindings=self.port_bindings, volumes_from=volumes_from, network_mode=self.network_name, shm_size=self.shm_size, cap_add=['ALL'], security_opt=['apparmor=unconfined'], ) return host_config def remove_container(self, short_id): print('REMOVING: ' + short_id) try: self.cli.remove_container(short_id, force=True) except Exception as e: print(e) reqid = None ip = self.redis.hget('all_containers', short_id) if ip: reqid = self.redis.hget('ip:' + ip, 'reqid') with redis.utils.pipeline(self.redis) as pi: pi.delete('ct:' + short_id) if not ip: return pi.hdel('all_containers', short_id) pi.delete('ip:' + ip) if reqid: pi.delete('req:' + reqid) def event_loop(self): for event in self.cli.events(decode=True): try: self.handle_docker_event(event) except Exception as e: print(e) def handle_docker_event(self, event): if event['Type'] != 'container': return if (event['status'] == 'die' and event['from'].startswith(self.browser_image_prefix) and event['Actor']['Attributes'].get( self.label_name) == self.name): short_id = self.sid(event['id']) print('EXITED: ' + short_id) self.remove_container(short_id) self.redis.decr('num_containers') return if (event['status'] == 'start' and event['from'].startswith(self.browser_image_prefix) and event['Actor']['Attributes'].get( self.label_name) == self.name): short_id = self.sid(event['id']) print('STARTED: ' + short_id) self.redis.incr('num_containers') self.redis.setex('ct:' + short_id, self.duration, 1) return def remove_expired_loop(self): while True: try: self.remove_expired() except Exception as e: print(e) time.sleep(self.remove_expired_secs) def remove_expired(self): all_known_ids = self.redis.hkeys('all_containers') all_containers = { self.sid(c['Id']) for c in self.cli.containers(quiet=True) } for short_id in all_known_ids: if not self.redis.get('ct:' + short_id): print('TIME EXPIRED: ' + short_id) self.remove_container(short_id) elif short_id not in all_containers: print('STALE ID: ' + short_id) self.remove_container(short_id) def auto_adjust_max(self): print('Auto-Adjust Max Loop') try: scale = self.redis.get('cpu_auto_adjust') if not scale: return info = self.cli.info() cpus = int(info.get('NCPU', 0)) if cpus <= 1: return total = int(float(scale) * cpus) self.redis.set('max_containers', total) except Exception as e: traceback.print_exc() def add_new_client(self, reqid): client_id = self.redis.incr('clients') #enc_id = base64.b64encode(os.urandom(27)).decode('utf-8') self.redis.setex('cm:' + reqid, self.queue_expire_secs, client_id) self.redis.setex('q:' + str(client_id), self.queue_expire_secs, 1) return client_id def _make_reqid(self): return base64.b32encode(os.urandom(15)).decode('utf-8') def _make_vnc_pass(self): return base64.b64encode(os.urandom(21)).decode('utf-8') def register_request(self, container_data): reqid = self._make_reqid() container_data['reqid'] = reqid self.redis.hmset('req:' + reqid, container_data) self.redis.expire('req:' + reqid, self.init_req_expire_secs) return reqid def am_i_next(self, reqid): client_id = self.redis.get('cm:' + reqid) if not client_id: client_id = self.add_new_client(reqid) else: self.redis.expire('cm:' + reqid, self.queue_expire_secs) client_id = int(client_id) next_client = int(self.redis.get('next_client')) # not next client if client_id != next_client: # if this client expired, delete it from queue if not self.redis.get('q:' + str(next_client)): print('skipping expired', next_client) self.redis.incr('next_client') # missed your number somehow, get a new one! if client_id < next_client: client_id = self.add_new_client(reqid) diff = client_id - next_client if self.throttle(): self.redis.expire('q:' + str(client_id), self.queue_expire_secs) return client_id - next_client #num_containers = self.redis.hlen('all_containers') num_containers = int(self.redis.get('num_containers')) max_containers = self.redis.get('max_containers') max_containers = int( max_containers) if max_containers else self.max_containers if diff <= (max_containers - num_containers): self.redis.incr('next_client') return -1 else: self.redis.expire('q:' + str(client_id), self.queue_expire_secs) return client_id - next_client def throttle(self): timings = self.redis.lrange('init_timings', 0, -1) if not timings: return False timings = self.redis.mget(*timings) avg = 0 count = 0 for val in timings: if val is not None: avg += float(val) count += 1 if count == 0: return False avg = avg / count print('AVG: ', avg) throttle_max_avg = float(self.redis.get('throttle_max_avg')) if avg >= throttle_max_avg: print('Throttling, too slow...') return True return False def _copy_env(self, env, name, override=None): env[name] = override or os.environ.get(name) def init_new_browser(self, reqid, host, width=None, height=None): req_key = 'req:' + reqid container_data = self.redis.hgetall(req_key) if not container_data: return None # already started, attempt to reconnect if 'queue' in container_data: container_data['ttl'] = self.redis.ttl('ct:' + container_data['id']) return container_data queue_pos = self.am_i_next(reqid) if queue_pos >= 0: return {'queue': queue_pos} browser = container_data['browser'] url = container_data.get('url', 'about:blank') ts = container_data.get('request_ts') env = {} env['URL'] = url env['TS'] = ts env['BROWSER'] = browser vnc_pass = self._make_vnc_pass() env['VNC_PASS'] = vnc_pass self._copy_env(env, 'PROXY_HOST') self._copy_env(env, 'PROXY_PORT') self._copy_env(env, 'PROXY_GET_CA') self._copy_env(env, 'SCREEN_WIDTH', width) self._copy_env(env, 'SCREEN_HEIGHT', height) self._copy_env(env, 'IDLE_TIMEOUT') self._copy_env(env, 'AUDIO_TYPE') info = self.timed_new_container(browser, env, host, reqid) info['queue'] = 0 info['vnc_pass'] = vnc_pass new_key = 'ip:' + info['ip'] # TODO: support different durations? self.duration = int(self.redis.get('container_expire_secs')) with redis.utils.pipeline(self.redis) as pi: pi.rename(req_key, new_key) pi.persist(new_key) pi.hmset(req_key, info) pi.expire(req_key, self.duration) info['ttl'] = self.duration return info def clone_browser(self, reqid, id_, name): short_id = self.redis.hget('req:' + reqid, 'id') #try: # container = self.cli.containers.get(short_id) #except Exception as e: # print(e) # print('Container Not Found: ' + short_id) # return {'error': str(e)} env = {} self._copy_env(env, 'PROXY_HOST') self._copy_env(env, 'PROXY_PORT') self._copy_env(env, 'PROXY_GET_CA') self._copy_env(env, 'SCREEN_WIDTH') self._copy_env(env, 'SCREEN_HEIGHT') self._copy_env(env, 'IDLE_TIMEOUT') self._copy_env(env, 'AUDIO_TYPE') env_list = [] for n, v in env.items(): if n and v: env_list.append(n + '=' + v) config = {'Env': env_list, 'Labels': {'wr.name': name}} try: exec_id = self.cli.exec_create( container=short_id, cmd="bash -c 'kill $(cat /tmp/browser_pid)'") self.cli.exec_start(exec_id=exec_id['Id'], detach=False, tty=False) time.sleep(0.5) except Exception as e: print(e) try: res = self.cli.commit(container=short_id, repository='oldwebtoday/user/' + id_, conf=config) return {'success': '1'} except Exception as e: print(e) return {'error': str(e)} def get_random_browser(self): browsers = self.load_avail_browsers() while True: id_ = random.choice(browsers.keys()) if browsers[id_].get('skip_random'): continue return id_