class Docker: dockerconf = KOOPLEX.get('docker', {}) def __init__(self): base_url = self.dockerconf.get('base_url', '') self.client = Client(base_url=base_url) logger.debug("Client init") self.check = None def list_imagenames(self): logger.debug("Listing image names") pattern_imagenamefilter = KOOPLEX.get('docker', {}).get( 'pattern_imagename_filter', r'^image-%(\w+):\w$') for image in self.client.images(all=True): if image['RepoTags'] is None: continue for tag in image['RepoTags']: if re.match(pattern_imagenamefilter, tag): _, imagename, _ = re.split(pattern_imagenamefilter, tag) logger.debug("Found image: %s" % imagename) yield imagename def list_volumenames(self): logger.debug("Listing volume names") volumes = self.client.volumes() for volume in volumes['Volumes']: yield volume['Name'] def create_volume(self, volume): volume_dir = None #self.dockerconf.get('volume_dir', '') if volume_dir: self.client.create_volume(name=volume.name, driver='local', driver_opts={ 'device': '%s/%s/' % (volume_dir, volume.name), 'o': 'bind', 'type': 'none' }) else: self.client.create_volume(name=volume.name, ) logger.debug("Volume %s created" % volume.name) return True #self.get_container(container) def delete_volume(self, volume): self.client.remove_volume(name=volume.name) logger.debug("Volume %s deleted" % volume.name) def get_container(self, container): for item in self.client.containers(all=True): # docker API prepends '/' in front of container names if '/' + container.name in item['Names']: logger.debug("Get container %s" % container.name) return item return None def create_container(self, container): volumes = [] # the list of mount points in the container binds = {} # a mapping dictionary of the container mounts for volume in container.volumes: logger.debug("container %s, volume %s" % (container, volume)) mp = volume.mountpoint volumes.append(mp) binds[volume.name] = { 'bind': mp, 'mode': volume.mode(container.user) } logger.debug("container %s binds %s" % (container, binds)) host_config = self.client.create_host_config( binds=binds, privileged=True, mem_limit='2g', memswap_limit='170m', mem_swappiness=0, # oom_kill_disable = True, cpu_shares=2, ) network = self.dockerconf.get('network', 'host') networking_config = {'EndpointsConfig': {network: {}}} ports = self.dockerconf.get('container_ports', [8000, 9000]) imagename = container.image.imagename if container.image else self.dockerconf.get( 'default_image', 'basic') args = { 'name': container.name, 'image': imagename, 'detach': True, 'hostname': container.name, 'host_config': host_config, 'networking_config': networking_config, 'environment': container.environment, 'volumes': volumes, 'ports': ports, } self.client.create_container(**args) logger.debug("Container created") self.managemount(container) #FIXME: check if not called twice return self.get_container(container) def _writefile(self, container_name, path, filename, content): import tarfile import time from io import BytesIO tarstream = BytesIO() tar = tarfile.TarFile(fileobj=tarstream, mode='w') tarinfo = tarfile.TarInfo(name=filename) tarinfo.size = len(content) tarinfo.mtime = time.time() tar.addfile(tarinfo, BytesIO(content)) tar.close() tarstream.seek(0) try: status = self.client.put_archive(container=container_name, path=path, data=tarstream) logger.info("container %s put_archive %s/%s returns %s" % (container_name, path, filename, status)) except Exception as e: logger.error("container %s put_archive %s/%s fails -- %s" % (container_name, path, filename, e)) def managemount(self, container): from kooplex.lib.fs_dirname import Dirname path, filename = os.path.split( self.dockerconf.get('mountconf', '/tmp/mount.conf')) mapper = [] for v in container.volumes: mapper.extend([ "%s:%s" % (v.volumetype, d) for d in Dirname.containervolume_listfolders(container, v) ]) #NOTE: mounter uses read to process the mapper configuration, thus we need to make sure '\n' terminates the config mapper file mapper.append('') logger.debug("container %s map %s" % (container, mapper)) file_data = "\n".join(mapper).encode('utf8') self._writefile(container.name, path, filename, file_data) def trigger_impersonator(self, vcproject): #FIXME: dont call it 1-by-1 from kooplex.lib.fs_dirname import Dirname container_name = self.dockerconf.get('impersonator', 'impersonator') path, filename = os.path.split( self.dockerconf.get('gitcommandconf', '/tmp/gitcommand.conf')) cmdmaps = [] token = vcproject.token fn_clonesh = os.path.join(Dirname.vcpcache(vcproject), "clone.sh") fn_key = os.path.join(Dirname.userhome(vcproject.token.user), '.ssh', token.fn_rsa) cmdmaps.append( "%s:%s:%s:%s" % (token.user.username, fn_key, token.repository.domain, fn_clonesh)) cmdmaps.append('') file_data = "\n".join(cmdmaps).encode('utf8') self._writefile(container_name, path, filename, file_data) def run_container(self, container): docker_container_info = self.get_container(container) if docker_container_info is None: logger.debug("Container did not exist, Creating new one") docker_container_info = self.create_container(container) container_state = docker_container_info['Status'] if container_state == 'Created' or container_state.startswith( 'Exited'): logger.debug("Starting container") self.start_container(container) def refresh_container_state(self, container): docker_container_info = self.get_container(container) container_state = docker_container_info['State'] logger.debug("Container state %s" % container_state) container.last_message = str(container_state) container.last_message_at = now() container.save() def start_container(self, container): self.client.start(container.name) # we need to retrieve the container state after starting it docker_container_info = self.get_container(container) container_state = docker_container_info['State'] logger.debug("Container state %s" % container_state) container.last_message = str(container_state) container.last_message_at = now() assert container_state == 'running', "Container failed to start: %s" % docker_container_info def stop_container(self, container): try: self.client.stop(container.name) container.last_message = 'Container stopped' except Exception as e: logger.warn("docker container not found by API -- %s" % e) container.last_message = str(e) def remove_container(self, container): try: self.client.remove_container(container.name) container.last_message = 'Container removed' container.last_message_at = now() except Exception as e: logger.warn("docker container not found by API -- %s" % e) container.last_message = str(e) container.last_message_at = now() logger.debug("Container removed %s" % container.name) #FIXME: az execute2 lesz az igazi... def execute(self, container, command): logger.info("execution: %s in %s" % (command, container)) execution = self.client.exec_create(container=container.name, cmd=shlex.split(command)) return self.client.exec_start(execution, detach=False) def execute2(self, container, command): logger.info("execution: %s in %s" % (command, container)) execution = self.client.exec_create(container=container.name, cmd=shlex.split(command)) response = self.client.exec_start(exec_id=execution['Id'], stream=False) check = self.client.exec_inspect(exec_id=execution['Id']) self.check = check if check['ExitCode'] != 0: logger.error('Execution %s in %s failed -- %s' % (command, container, check)) return response.decode()
class DockerController(object): def _load_config(self): with open('./config.yaml') as fh: config = yaml.load(fh) return config def __init__(self): config = self._load_config() self.LOCAL_REDIS_HOST = 'netcapsule_redis_1' self.REDIS_HOST = os.environ.get('REDIS_HOST', self.LOCAL_REDIS_HOST) self.PYWB_HOST = os.environ.get('PYWB_HOST', 'netcapsule_pywb_1') self.C_EXPIRE_TIME = config['init_container_expire_secs'] self.Q_EXPIRE_TIME = config['queue_expire_secs'] self.REMOVE_EXP_TIME = config['remove_expired_secs'] self.VERSION = config['api_version'] self.VNC_PORT = config['vnc_port'] self.CMD_PORT = config['cmd_port'] self.MAX_CONT = config['max_containers'] self.image_prefix = config['image_prefix'] self.browser_list = config['browsers'] self.browser_paths = {} for browser in self.browser_list: path = browser['path'] if path in self.browser_paths: raise Exception('Already a browser for path {0}'.format(path)) self.browser_paths[path] = browser self.default_browser = config['default_browser'] self.redirect_paths = config['redirect_paths'] self.randompages = [] try: with open(config['random_page_file']) as fh: self.randompages = list([line.rstrip() for line in fh]) except Exception as e: print(e) self.redis = redis.StrictRedis(host=self.REDIS_HOST) self.redis.setnx('next_client', '1') self.redis.setnx('max_containers', self.MAX_CONT) self.redis.setnx('num_containers', '0') self.redis.setnx('cpu_auto_adjust', 5.5) throttle_samples = config['throttle_samples'] self.redis.setnx('throttle_samples', throttle_samples) throttle_max_avg = config['throttle_max_avg'] self.redis.setnx('throttle_max_avg', throttle_max_avg) self.redis.setnx('container_expire_secs', config['full_container_expire_secs']) self.T_EXPIRE_TIME = config['throttle_expire_secs'] if os.path.exists('/var/run/docker.sock'): self.cli = Client(base_url='unix://var/run/docker.sock', version=self.VERSION) else: kwargs = kwargs_from_env(assert_hostname=False) kwargs['version'] = self.VERSION self.cli = Client(**kwargs) def _get_host_port(self, info, port, default_host): info = info['NetworkSettings']['Ports'][str(port) + '/tcp'] info = info[0] host = info['HostIp'] if host == '0.0.0.0' and default_host: host = default_host return host + ':' + info['HostPort'] def timed_new_container(self, browser, env, host, client_id): start = time.time() info = self.new_container(browser, env, host) end = time.time() dur = end - start time_key = 't:' + client_id self.redis.setex(time_key, self.T_EXPIRE_TIME, dur) throttle_samples = int(self.redis.get('throttle_samples')) print('INIT DUR: ' + str(dur)) self.redis.lpush('init_timings', time_key) self.redis.ltrim('init_timings', 0, throttle_samples - 1) return info def new_container(self, browser_id, env=None, default_host=None): browser = self.browser_paths.get(browser_id) # get default browser if not browser: browser = self.browser_paths.get(self.default_browser) if browser.get('req_width'): env['SCREEN_WIDTH'] = browser.get('req_width') if browser.get('req_height'): env['SCREEN_HEIGHT'] = browser.get('req_height') container = self.cli.create_container( image=self.image_prefix + '/' + browser['id'], ports=[self.VNC_PORT, self.CMD_PORT], environment=env, ) short_id = None try: id_ = container.get('Id') short_id = id_[:12] res = self.cli.start( container=id_, port_bindings={ self.VNC_PORT: None, self.CMD_PORT: None }, volumes_from=['netcapsule_shared_data_1'], network_mode='netcapsule', ) info = self.cli.inspect_container(id_) ip = info['NetworkSettings']['IPAddress'] if not ip: ip = info['NetworkSettings']['Networks']['netcapsule'][ 'IPAddress'] #self.redis.hset('all_containers', short_id, ip) self.redis.incr('num_containers') self.redis.setex('c:' + short_id, self.C_EXPIRE_TIME, 1) return { 'vnc_host': self._get_host_port(info, self.VNC_PORT, default_host), 'cmd_host': self._get_host_port(info, self.CMD_PORT, default_host), } except Exception as e: if short_id: self.remove_container(short_id) traceback.print_exc(e) return {} def remove_container(self, short_id, ip=None): print('REMOVING ' + short_id) try: self.cli.remove_container(short_id, force=True) except Exception as e: print(e) #self.redis.hdel('all_containers', short_id) self.redis.delete('c:' + short_id) if ip: ip_keys = self.redis.keys(ip + ':*') for key in ip_keys: self.redis.delete(key) def remove_expired(self): print('Start Expired Check') while True: try: value = self.redis.blpop('remove_q', 1000) if not value: continue short_id, ip = value[1].split(' ') self.remove_container(short_id, ip) self.redis.decr('num_containers') except Exception as e: traceback.print_exc(e) def check_nodes(self): print('Check Nodes') try: scale = self.redis.get('cpu_auto_adjust') if not scale: return info = self.cli.info() cpus = int(info.get('NCPU', 0)) if cpus <= 1: return total = int(float(scale) * cpus) self.redis.set('max_containers', total) except Exception as e: print(e) def add_new_client(self): client_id = self.redis.incr('clients') enc_id = base64.b64encode(os.urandom(27)) self.redis.setex('cm:' + enc_id, self.Q_EXPIRE_TIME, client_id) self.redis.setex('q:' + str(client_id), self.Q_EXPIRE_TIME, 1) return enc_id, client_id def am_i_next(self, enc_id): client_id = None if enc_id: self.redis.expire('cm:' + enc_id, self.Q_EXPIRE_TIME) client_id = self.redis.get('cm:' + enc_id) if not client_id: enc_id, client_id = self.add_new_client() client_id = int(client_id) next_client = int(self.redis.get('next_client')) # not next client if client_id != next_client: # if this client expired, delete it from queue if not self.redis.get('q:' + str(next_client)): print('skipping expired', next_client) self.redis.incr('next_client') # missed your number somehow, get a new one! if client_id < next_client: enc_id, client_id = self.add_new_client() diff = client_id - next_client if self.throttle(): self.redis.expire('q:' + str(client_id), self.Q_EXPIRE_TIME) return enc_id, client_id - next_client #num_containers = self.redis.hlen('all_containers') num_containers = int(self.redis.get('num_containers')) max_containers = self.redis.get('max_containers') max_containers = int( max_containers) if max_containers else self.MAX_CONT if diff <= (max_containers - num_containers): self.redis.incr('next_client') return enc_id, -1 else: self.redis.expire('q:' + str(client_id), self.Q_EXPIRE_TIME) return enc_id, client_id - next_client def throttle(self): timings = self.redis.lrange('init_timings', 0, -1) if not timings: return False timings = self.redis.mget(*timings) avg = 0 count = 0 for val in timings: if val is not None: avg += float(val) count += 1 if count == 0: return False avg = avg / count print('AVG: ', avg) throttle_max_avg = float(self.redis.get('throttle_max_avg')) if avg >= throttle_max_avg: print('Throttling, too slow...') return True return False def do_init(self, browser, url, ts, host, client_id): env = {} env['URL'] = url env['TS'] = ts env['SCREEN_WIDTH'] = os.environ.get('SCREEN_WIDTH') env['SCREEN_HEIGHT'] = os.environ.get('SCREEN_HEIGHT') env['REDIS_HOST'] = dc.REDIS_HOST env['PYWB_HOST_PORT'] = dc.PYWB_HOST + ':8080' env['BROWSER'] = browser info = self.timed_new_container(browser, env, host, client_id) info['queue'] = 0 return info def get_randompage(self): if not self.randompages: return '/' url, ts = random.choice(self.randompages).split(' ', 1) print(url, ts) path = self.get_random_browser() return '/' + path + '/' + ts + '/' + url def get_random_browser(self): while True: id_ = random.choice(self.browser_paths.keys()) if self.browser_paths[id_].get('skip_random'): continue return id_
class Docker: dockerconf = KOOPLEX.get('docker', {}) def __init__(self): base_url = self.dockerconf.get('base_url', '') self.client = Client(base_url = base_url) logger.debug("Client init") self.check = None def list_imagenames(self): logger.debug("Listing image names") pattern_imagenamefilter = KOOPLEX.get('docker', {}).get('pattern_imagename_filter', r'^image-%(\w+):\w$') for image in self.client.images(all = True): if image['RepoTags'] is None: continue for tag in image['RepoTags']: if re.match(pattern_imagenamefilter, tag): _, imagename, _ = re.split(pattern_imagenamefilter, tag) logger.debug("Found image: %s" % imagename) yield imagename def list_volumenames(self): logger.debug("Listing volume names") volumes = self.client.volumes() for volume in volumes['Volumes']: yield volume['Name'] def get_container(self, container): for item in self.client.containers(all = True): # docker API prepends '/' in front of container names if '/' + container.name in item['Names']: logger.debug("Get container %s" % container.name) return item return None def create_container(self, container): volumes = [] # the list of mount points in the container binds = {} # a mapping dictionary of the container mounts for volume in container.volumes: logger.debug("container %s, volume %s" % (container, volume)) mp = volume.mountpoint volumes.append(mp) binds[volume.name] = { 'bind': mp, 'mode': volume.mode(container.user) } logger.debug("container %s binds %s" % (container, binds)) host_config = self.client.create_host_config( binds = binds, privileged = True, mem_limit = '2g', memswap_limit = '170m', mem_swappiness = 0, # oom_kill_disable = True, cpu_shares = 2, ) network = self.dockerconf.get('network', 'host') networking_config = { 'EndpointsConfig': { network: {} } } ports = self.dockerconf.get('container_ports', [ 8000, 9000 ]) imagename = container.image.imagename if container.image else self.dockerconf.get('default_image', 'basic') args = { 'name': container.name, 'image': imagename, 'detach': True, 'hostname': container.name, 'host_config': host_config, 'networking_config': networking_config, 'environment': container.environment, 'volumes': volumes, 'ports': ports, } self.client.create_container(**args) logger.debug("Container created") self.managemount(container) #FIXME: check if not called twice return self.get_container(container) def _writefile(self, container_name, path, filename, content): import tarfile import time from io import BytesIO tarstream = BytesIO() tar = tarfile.TarFile(fileobj = tarstream, mode = 'w') tarinfo = tarfile.TarInfo(name = filename) tarinfo.size = len(content) tarinfo.mtime = time.time() tar.addfile(tarinfo, BytesIO(content)) tar.close() tarstream.seek(0) try: status = self.client.put_archive(container = container_name, path = path, data = tarstream) logger.info("container %s put_archive %s/%s returns %s" % (container_name, path, filename, status)) except Exception as e: logger.error("container %s put_archive %s/%s fails -- %s" % (container_name, path, filename, e)) def managemount(self, container): from kooplex.lib.fs_dirname import Dirname path, filename = os.path.split(self.dockerconf.get('mountconf', '/tmp/mount.conf')) mapper = [] for v in container.volumes: mapper.extend([ "%s:%s" % (v.volumetype, d) for d in Dirname.containervolume_listfolders(container, v) ]) #NOTE: mounter uses read to process the mapper configuration, thus we need to make sure '\n' terminates the config mapper file mapper.append('') logger.debug("container %s map %s" % (container, mapper)) file_data = "\n".join(mapper).encode('utf8') self._writefile(container.name, path, filename, file_data) def trigger_impersonator(self, vcproject): #FIXME: dont call it 1-by-1 from kooplex.lib.fs_dirname import Dirname container_name = self.dockerconf.get('impersonator', 'impersonator') path, filename = os.path.split(self.dockerconf.get('gitcommandconf', '/tmp/gitcommand.conf')) cmdmaps = [] token = vcproject.token fn_clonesh = os.path.join(Dirname.vcpcache(vcproject), "clone.sh") fn_key = os.path.join(Dirname.userhome(vcproject.token.user), '.ssh', token.fn_rsa) cmdmaps.append("%s:%s:%s:%s" % (token.user.username, fn_key, token.repository.domain, fn_clonesh)) cmdmaps.append('') file_data = "\n".join(cmdmaps).encode('utf8') self._writefile(container_name, path, filename, file_data) def run_container(self, container): docker_container_info = self.get_container(container) if docker_container_info is None: logger.debug("Container did not exist, Creating new one") docker_container_info = self.create_container(container) container_state = docker_container_info['Status'] if container_state == 'Created' or container_state.startswith('Exited'): logger.debug("Starting container") self.start_container(container) def refresh_container_state(self, container): docker_container_info = self.get_container(container) container_state = docker_container_info['State'] logger.debug("Container state %s" % container_state) container.last_message = str(container_state) container.last_message_at = now() container.save() def start_container(self, container): self.client.start(container.name) # we need to retrieve the container state after starting it docker_container_info = self.get_container(container) container_state = docker_container_info['State'] logger.debug("Container state %s" % container_state) container.last_message = str(container_state) container.last_message_at = now() assert container_state == 'running', "Container failed to start: %s" % docker_container_info def stop_container(self, container): try: self.client.stop(container.name) container.last_message = 'Container stopped' except Exception as e: logger.warn("docker container not found by API -- %s" % e) container.last_message = str(e) def remove_container(self, container): try: self.client.remove_container(container.name) container.last_message = 'Container removed' container.last_message_at = now() except Exception as e: logger.warn("docker container not found by API -- %s" % e) container.last_message = str(e) container.last_message_at = now() logger.debug("Container removed %s" % container.name) #FIXME: az execute2 lesz az igazi... def execute(self, container, command): logger.info("execution: %s in %s" % (command, container)) execution = self.client.exec_create(container = container.name, cmd = shlex.split(command)) return self.client.exec_start(execution, detach = False) def execute2(self, container, command): logger.info("execution: %s in %s" % (command, container)) execution = self.client.exec_create(container = container.name, cmd = shlex.split(command)) response = self.client.exec_start(exec_id = execution['Id'], stream = False) check = self.client.exec_inspect(exec_id = execution['Id']) self.check = check if check['ExitCode'] != 0: logger.error('Execution %s in %s failed -- %s' % (command, container, check)) return response.decode()
class DockerController(object): def _load_config(self): with open('./config.yaml') as fh: config = yaml.load(fh) return config def __init__(self): config = self._load_config() self.LOCAL_REDIS_HOST = 'netcapsule_redis_1' self.REDIS_HOST = os.environ.get('REDIS_HOST', self.LOCAL_REDIS_HOST) self.PYWB_HOST = os.environ.get('PYWB_HOST', 'netcapsule_pywb_1') self.C_EXPIRE_TIME = config['init_container_expire_secs'] self.Q_EXPIRE_TIME = config['queue_expire_secs'] self.REMOVE_EXP_TIME = config['remove_expired_secs'] self.VERSION = config['api_version'] self.VNC_PORT = config['vnc_port'] self.CMD_PORT = config['cmd_port'] self.MAX_CONT = config['max_containers'] self.image_prefix = config['image_prefix'] self.browser_list = config['browsers'] self.browser_paths = {} for browser in self.browser_list: path = browser['path'] if path in self.browser_paths: raise Exception('Already a browser for path {0}'.format(path)) self.browser_paths[path] = browser self.default_browser = config['default_browser'] self.redirect_paths = config['redirect_paths'] self.randompages = [] try: with open(config['random_page_file']) as fh: self.randompages = list([line.rstrip() for line in fh]) except Exception as e: print(e) self.redis = redis.StrictRedis(host=self.REDIS_HOST) self.redis.setnx('next_client', '1') self.redis.setnx('max_containers', self.MAX_CONT) self.redis.setnx('num_containers', '0') self.redis.setnx('cpu_auto_adjust', 5.5) throttle_samples = config['throttle_samples'] self.redis.setnx('throttle_samples', throttle_samples) throttle_max_avg = config['throttle_max_avg'] self.redis.setnx('throttle_max_avg', throttle_max_avg) self.redis.setnx('container_expire_secs', config['full_container_expire_secs']) self.T_EXPIRE_TIME = config['throttle_expire_secs'] if os.path.exists('/var/run/docker.sock'): self.cli = Client(base_url='unix://var/run/docker.sock', version=self.VERSION) else: kwargs = kwargs_from_env(assert_hostname=False) kwargs['version'] = self.VERSION self.cli = Client(**kwargs) def _get_host_port(self, info, port, default_host): info = info['NetworkSettings']['Ports'][str(port) + '/tcp'] info = info[0] host = info['HostIp'] if host == '0.0.0.0' and default_host: host = default_host return host + ':' + info['HostPort'] def timed_new_container(self, browser, env, host, client_id): start = time.time() info = self.new_container(browser, env, host) end = time.time() dur = end - start time_key = 't:' + client_id self.redis.setex(time_key, self.T_EXPIRE_TIME, dur) throttle_samples = int(self.redis.get('throttle_samples')) print('INIT DUR: ' + str(dur)) self.redis.lpush('init_timings', time_key) self.redis.ltrim('init_timings', 0, throttle_samples - 1) return info def new_container(self, browser_id, env=None, default_host=None): browser = self.browser_paths.get(browser_id) # get default browser if not browser: browser = self.browser_paths.get(self.default_browser) container = self.cli.create_container(image=self.image_prefix + '/' + browser['id'], ports=[self.VNC_PORT, self.CMD_PORT], environment=env, ) short_id = None try: id_ = container.get('Id') short_id = id_[:12] res = self.cli.start(container=id_, port_bindings={self.VNC_PORT: None, self.CMD_PORT: None}, volumes_from=['netcapsule_shared_data_1'], network_mode='netcapsule', ) info = self.cli.inspect_container(id_) ip = info['NetworkSettings']['IPAddress'] if not ip: ip = info['NetworkSettings']['Networks']['netcapsule']['IPAddress'] #self.redis.hset('all_containers', short_id, ip) self.redis.incr('num_containers') self.redis.setex('c:' + short_id, self.C_EXPIRE_TIME, 1) return {'vnc_host': self._get_host_port(info, self.VNC_PORT, default_host), 'cmd_host': self._get_host_port(info, self.CMD_PORT, default_host), } except Exception as e: if short_id: self.remove_container(short_id) traceback.print_exc(e) return {} def remove_container(self, short_id, ip=None): print('REMOVING ' + short_id) try: self.cli.remove_container(short_id, force=True) except Exception as e: print(e) #self.redis.hdel('all_containers', short_id) self.redis.delete('c:' + short_id) if ip: ip_keys = self.redis.keys(ip + ':*') for key in ip_keys: self.redis.delete(key) def remove_expired(self): print('Start Expired Check') while True: try: value = self.redis.blpop('remove_q', 1000) if not value: continue short_id, ip = value[1].split(' ') self.remove_container(short_id, ip) self.redis.decr('num_containers') except Exception as e: traceback.print_exc(e) def check_nodes(self): print('Check Nodes') try: scale = self.redis.get('cpu_auto_adjust') if not scale: return info = self.cli.info() cpus = int(info.get('NCPU', 0)) if cpus <= 1: return total = int(float(scale) * cpus) self.redis.set('max_containers', total) except Exception as e: print(e) def add_new_client(self): client_id = self.redis.incr('clients') enc_id = base64.b64encode(os.urandom(27)) self.redis.setex('cm:' + enc_id, self.Q_EXPIRE_TIME, client_id) self.redis.setex('q:' + str(client_id), self.Q_EXPIRE_TIME, 1) return enc_id, client_id def am_i_next(self, enc_id): client_id = None if enc_id: self.redis.expire('cm:' + enc_id, self.Q_EXPIRE_TIME) client_id = self.redis.get('cm:' + enc_id) if not client_id: enc_id, client_id = self.add_new_client() client_id = int(client_id) next_client = int(self.redis.get('next_client')) # not next client if client_id != next_client: # if this client expired, delete it from queue if not self.redis.get('q:' + str(next_client)): print('skipping expired', next_client) self.redis.incr('next_client') # missed your number somehow, get a new one! if client_id < next_client: enc_id, client_id = self.add_new_client() diff = client_id - next_client if self.throttle(): self.redis.expire('q:' + str(client_id), self.Q_EXPIRE_TIME) return enc_id, client_id - next_client #num_containers = self.redis.hlen('all_containers') num_containers = int(self.redis.get('num_containers')) max_containers = self.redis.get('max_containers') max_containers = int(max_containers) if max_containers else self.MAX_CONT if diff <= (max_containers - num_containers): self.redis.incr('next_client') return enc_id, -1 else: self.redis.expire('q:' + str(client_id), self.Q_EXPIRE_TIME) return enc_id, client_id - next_client def throttle(self): timings = self.redis.lrange('init_timings', 0, -1) if not timings: return False timings = self.redis.mget(*timings) avg = 0 count = 0 for val in timings: if val is not None: avg += float(val) count += 1 if count == 0: return False avg = avg / count print('AVG: ', avg) throttle_max_avg = float(self.redis.get('throttle_max_avg')) if avg >= throttle_max_avg: print('Throttling, too slow...') return True return False def do_init(self, browser, url, ts, host, client_id): env = {} env['URL'] = url env['TS'] = ts env['SCREEN_WIDTH'] = os.environ.get('SCREEN_WIDTH') env['SCREEN_HEIGHT'] = os.environ.get('SCREEN_HEIGHT') env['REDIS_HOST'] = dc.REDIS_HOST env['PYWB_HOST_PORT'] = dc.PYWB_HOST + ':8080' env['BROWSER'] = browser info = self.timed_new_container(browser, env, host, client_id) info['queue'] = 0 return info def get_randompage(self): if not self.randompages: return '/' url, ts = random.choice(self.randompages).split(' ', 1) print(url, ts) path = random.choice(self.browser_paths.keys()) return '/' + path + '/' + ts + '/' + url
def delete_instance(docker_api_endpoint, service_name, container_id): c = DockerClient(docker_api_endpoint) c.stop(container_id) c.remove_container(container_id)
class DockerPyClient(DockerClient): def __init__(self, remote, username=None, password=None, email=None): super(DockerPyClient,self).__init__() self.client = Client(base_url=remote, version='1.15') self.log = logging.getLogger(__name__) self.log.debug('password %s, remote = %s, username=%s', password, remote, username) if username: self.client.login(username=username, password=password, email=email) def docker_images(self, filters=None): return self.client.images(filters=filters) def __id(self, ioc): if ioc and 'Id' in ioc: return ioc['Id'] return None def docker_containers(self): return [{ 'Id': cont['Id'], 'Tag': cont['Image'], 'Image': self.__id(self.image(cont['Image'])), 'Names': cont['Names'], 'Ports': cont['Ports'], 'Created': cont['Created'], 'Command': cont['Command'], 'Status': cont['Status'], 'Running': cont['Status'].startswith('Up ') or cont['Status'].startswith('Restarting ') } for cont in self.client.containers(all=True)] def docker_pull(self, image): (repository, tag) = self.tag(image) existing = self.image(image) for line in self.client.pull(repository=repository, stream=True, insecure_registry=True): parsed = json.loads(line) self.log.debug('parsed %s' % parsed) if 'error' in parsed: raise Exception(parsed['error']) # Check if image updated self.flush_images() newer = self.image(image) if not existing or (newer['Id'] != existing['Id']): return True return False def docker_run(self, entry): volumes = ['/var/log/ext'] kwargs = { 'image': entry['image'], 'volumes': volumes, 'detach': True, 'environment': { 'DOCKER_IMAGE': entry['image'] } } if 'name' in entry: kwargs['name'] = entry['name'] if 'env' in entry: kwargs['environment'].update(entry['env']) if 'cpu' in entry: kwargs['cpu_shares'] = entry['cpu'] if 'memory' in entry: kwargs['mem_limit'] = entry['memory'] if 'entrypoint' in entry: kwargs['entrypoint'] = entry['entrypoint'] if 'command' in entry: kwargs['command'] = entry['command'] if 'volumes' in entry: volumes.extend([vol['containerPath'] for vol in entry['volumes'] if 'containerPath' in vol]) volsFrom = [vol['from'] for vol in entry['volumes'] if 'from' in vol] if len(volsFrom): kwargs['volumes_from'] = volsFrom if 'portMappings' in entry: kwargs['ports'] = [p['containerPort'] for p in entry['portMappings']] container = self.client.create_container(**kwargs) self.docker_start(container['Id'], entry) return container['Id'] def docker_start(self, container, entry=None): logsBound = False binds = {} restart_policy = 'on-failure' kwargs = { 'container': container, 'binds': binds } if entry is not None: if 'network' in entry: kwargs['network_mode'] = entry['network'] if 'privileged' in entry: kwargs['privileged'] = entry['privileged'] if 'volumes' in entry: volsFrom = [] for vol in entry['volumes']: if 'from' in vol: volsFrom.append(vol['from']) continue if not 'containerPath' in vol: self.log.warn('No container mount point specified, skipping volume') continue if not 'hostPath' in vol: # Just a local volume, no bindings continue binds[vol['hostPath']] = { 'bind': vol['containerPath'], 'ro': 'mode' in vol and vol['mode'].lower() == 'ro' } if vol['containerPath'] == '/var/log/ext': logsBound = True if len(volsFrom): kwargs['volumes_from'] = volsFrom if 'portMappings' in entry: portBinds = {} for pm in entry['portMappings']: portBinds[pm['containerPort']] = pm['hostPort'] if 'hostPort' in pm else None kwargs['port_bindings'] = portBinds if 'links' in entry: kwargs['links'] = entry['links'] if 'restart' in entry: restart_policy = entry['restart'] kwargs['restart_policy'] = { 'MaximumRetryCount': 0, 'Name': restart_policy } if not logsBound: binds['/var/log/ext/%s' % container] = { 'bind': '/var/log/ext', 'ro': False } self.client.start(**kwargs); def docker_signal(self, container, sig='HUP'): self.client.kill(container, sig) def docker_restart(self, container): self.client.restart(container) def docker_stop(self, container): self.client.stop(container) def docker_rm(self, container): self.client.remove_container(container) def docker_rmi(self, image): # Force removal, sometimes conflicts result from truncated pulls when # dockerup container upgrades/dies self.client.remove_image(image, force=True)
class DockerController(object): def _load_config(self): with open('./config.yaml') as fh: config = yaml.load(fh) return config def __init__(self): config = self._load_config() self.REDIS_HOST = config['redis_host'] self.PYWB_HOST = config['pywb_host'] self.C_EXPIRE_TIME = config['container_expire_secs'] self.Q_EXPIRE_TIME = config['queue_expire_secs'] self.REMOVE_EXP_TIME = config['remove_expired_secs'] self.VERSION = config['api_version'] self.VNC_PORT = config['vnc_port'] self.CMD_PORT = config['cmd_port'] self.image_prefix = config['image_prefix'] self.browsers = config['browsers'] self.redis = redis.StrictRedis(host=self.REDIS_HOST) self.redis.setnx('next_client', '1') if os.path.exists('/var/run/docker.sock'): self.cli = Client(base_url='unix://var/run/docker.sock', version=self.VERSION) else: kwargs = kwargs_from_env() kwargs['tls'].assert_hostname = False kwargs['version'] = self.VERSION self.cli = Client(**kwargs) def new_container(self, browser, env=None): tag = self.browsers.get(browser) # get default browser if not tag: tag = self.browsers[''] container = self.cli.create_container(image=self.image_prefix + '/' + tag, ports=[self.VNC_PORT, self.CMD_PORT], environment=env, ) id_ = container.get('Id') res = self.cli.start(container=id_, port_bindings={self.VNC_PORT: None, self.CMD_PORT: None}, links={self.PYWB_HOST: self.PYWB_HOST, self.REDIS_HOST: self.REDIS_HOST}, volumes_from=['netcapsule_shared_data_1'], ) vnc_port = self.cli.port(id_, self.VNC_PORT) vnc_port = vnc_port[0]['HostPort'] cmd_port = self.cli.port(id_, self.CMD_PORT) cmd_port = cmd_port[0]['HostPort'] info = self.cli.inspect_container(id_) ip = info['NetworkSettings']['IPAddress'] short_id = id_[:12] self.redis.hset('all_containers', short_id, ip) self.redis.setex('c:' + short_id, self.C_EXPIRE_TIME, 1) return vnc_port, cmd_port def remove_container(self, short_id, ip): print('REMOVING ' + short_id) try: self.cli.remove_container(short_id, force=True) except Exception as e: print(e) self.redis.hdel('all_containers', short_id) self.redis.delete('c:' + short_id) ip_keys = self.redis.keys(ip +':*') for key in ip_keys: self.redis.delete(key) def remove_all(self, check_expired=False): all_containers = self.redis.hgetall('all_containers') for short_id, ip in all_containers.iteritems(): if check_expired: remove = not self.redis.get('c:' + short_id) else: remove = True if remove: self.remove_container(short_id, ip) def add_new_client(self): #client_id = base64.b64encode(os.urandom(27)) #self.redis.rpush('q:clients', client_id) client_id = self.redis.incr('clients') self.redis.setex('q:' + str(client_id), self.Q_EXPIRE_TIME, 1) return client_id def am_i_next(self, client_id): next_client = int(self.redis.get('next_client')) # not next client if next_client != client_id: # if this client expired, delete it from queue if not self.redis.get('q:' + str(next_client)): print('skipping expired', next_client) self.redis.incr('next_client') # missed your number somehow, get a new one! if client_id < next_client: client_id = self.add_new_client() else: self.redis.expire('q:' + str(client_id), self.Q_EXPIRE_TIME) return client_id, client_id - next_client # not avail yet num_containers = self.redis.hlen('all_containers') if num_containers >= MAX_CONT: self.redis.expire('q:' + str(client_id), self.Q_EXPIRE_TIME) return client_id, client_id - next_client self.redis.incr('next_client') return client_id, -1
class DockerController(object): def _load_config(self): with open("./config.yaml") as fh: config = yaml.load(fh) return config def __init__(self): config = self._load_config() self.LOCAL_REDIS_HOST = "netcapsule_redis_1" self.REDIS_HOST = os.environ.get("REDIS_HOST", self.LOCAL_REDIS_HOST) self.PYWB_HOST = os.environ.get("PYWB_HOST", "netcapsule_pywb_1") self.C_EXPIRE_TIME = config["init_container_expire_secs"] self.Q_EXPIRE_TIME = config["queue_expire_secs"] self.REMOVE_EXP_TIME = config["remove_expired_secs"] self.VERSION = config["api_version"] self.VNC_PORT = config["vnc_port"] self.CMD_PORT = config["cmd_port"] self.MAX_CONT = config["max_containers"] self.image_prefix = config["image_prefix"] self.browser_list = config["browsers"] self.browser_paths = {} for browser in self.browser_list: path = browser["path"] if path in self.browser_paths: raise Exception("Already a browser for path {0}".format(path)) self.browser_paths[path] = browser self.default_browser = config["default_browser"] self.redirect_paths = config["redirect_paths"] self.randompages = [] try: with open(config["random_page_file"]) as fh: self.randompages = list([line.rstrip() for line in fh]) except Exception as e: print(e) self.redis = redis.StrictRedis(host=self.REDIS_HOST) self.redis.setnx("next_client", "1") self.redis.setnx("max_containers", self.MAX_CONT) throttle_samples = config["throttle_samples"] self.redis.setnx("throttle_samples", throttle_samples) throttle_max_avg = config["throttle_max_avg"] self.redis.setnx("throttle_max_avg", throttle_max_avg) self.redis.setnx("container_expire_secs", config["full_container_expire_secs"]) self.T_EXPIRE_TIME = config["throttle_expire_secs"] if os.path.exists("/var/run/docker.sock"): self.cli = Client(base_url="unix://var/run/docker.sock", version=self.VERSION) else: kwargs = kwargs_from_env(assert_hostname=False) kwargs["version"] = self.VERSION self.cli = Client(**kwargs) def _get_host_port(self, info, port, default_host): info = info["NetworkSettings"]["Ports"][str(port) + "/tcp"] info = info[0] host = info["HostIp"] if host == "0.0.0.0" and default_host: host = default_host return host + ":" + info["HostPort"] def timed_new_container(self, browser, env, host, client_id): start = time.time() info = dc.new_container(browser, env, host) end = time.time() dur = end - start time_key = "t:" + client_id self.redis.setex(time_key, self.T_EXPIRE_TIME, dur) throttle_samples = int(self.redis.get("throttle_samples")) print("INIT DUR: " + str(dur)) self.redis.lpush("init_timings", time_key) self.redis.ltrim("init_timings", 0, throttle_samples - 1) return info def new_container(self, browser_id, env=None, default_host=None): browser = self.browser_paths.get(browser_id) # get default browser if not browser: browser = self.browser_paths.get(self.default_browser) container = self.cli.create_container( image=self.image_prefix + "/" + browser["id"], ports=[self.VNC_PORT, self.CMD_PORT], environment=env ) short_id = None try: id_ = container.get("Id") short_id = id_[:12] res = self.cli.start( container=id_, port_bindings={self.VNC_PORT: None, self.CMD_PORT: None}, volumes_from=["netcapsule_shared_data_1"], network_mode="netcapsule", ) info = self.cli.inspect_container(id_) ip = info["NetworkSettings"]["IPAddress"] if not ip: ip = info["NetworkSettings"]["Networks"]["netcapsule"]["IPAddress"] self.redis.hset("all_containers", short_id, ip) self.redis.setex("c:" + short_id, self.C_EXPIRE_TIME, 1) return { "vnc_host": self._get_host_port(info, self.VNC_PORT, default_host), "cmd_host": self._get_host_port(info, self.CMD_PORT, default_host), } except Exception as e: if short_id: self.remove_container(short_id) traceback.print_exc(e) return {} def remove_container(self, short_id, ip=None): print("REMOVING " + short_id) try: self.cli.remove_container(short_id, force=True) except Exception as e: traceback.print_exc(e) self.redis.hdel("all_containers", short_id) self.redis.delete("c:" + short_id) if ip: ip_keys = self.redis.keys(ip + ":*") for key in ip_keys: self.redis.delete(key) def remove_all(self, check_expired=False): all_containers = self.redis.hgetall("all_containers") for short_id, ip in all_containers.iteritems(): if check_expired: remove = not self.redis.get("c:" + short_id) else: remove = True if remove: self.remove_container(short_id, ip) def add_new_client(self): client_id = self.redis.incr("clients") enc_id = base64.b64encode(os.urandom(27)) self.redis.setex("cm:" + enc_id, self.Q_EXPIRE_TIME, client_id) self.redis.setex("q:" + str(client_id), self.Q_EXPIRE_TIME, 1) return enc_id, client_id def am_i_next(self, enc_id): client_id = None if enc_id: self.redis.expire("cm:" + enc_id, self.Q_EXPIRE_TIME) client_id = self.redis.get("cm:" + enc_id) if not client_id: enc_id, client_id = dc.add_new_client() if not self.throttle(): self.redis.incr("next_client") return enc_id, -1 client_id = int(client_id) next_client = int(self.redis.get("next_client")) # not next client if client_id > next_client: # if this client expired, delete it from queue if not self.redis.get("q:" + str(next_client)): print("skipping expired", next_client) self.redis.incr("next_client") # missed your number somehow, get a new one! if client_id < next_client: enc_id, client_id = self.add_new_client() else: self.redis.expire("q:" + str(client_id), self.Q_EXPIRE_TIME) return enc_id, client_id - next_client # if true, container not avail yet # if self.throttle(): self.redis.expire("q:" + str(client_id), self.Q_EXPIRE_TIME) return enc_id, client_id - next_client # self.redis.incr('next_client') # return enc_id, -1 def throttle(self): num_containers = self.redis.hlen("all_containers") max_containers = self.redis.get("max_containers") max_containers = int(max_containers) if max_containers else self.MAX_CONT if num_containers >= max_containers: return True timings = self.redis.lrange("init_timings", 0, -1) if not timings: return False timings = self.redis.mget(*timings) avg = 0 count = 0 for val in timings: if val is not None: avg += float(val) count += 1 if count == 0: return False avg = avg / count print("AVG: ", avg) throttle_max_avg = float(self.redis.get("throttle_max_avg")) if avg >= throttle_max_avg: print("Throttling, too slow...") return True return False def do_init(self, browser, url, ts, host, client_id): env = {} env["URL"] = url env["TS"] = ts env["SCREEN_WIDTH"] = os.environ.get("SCREEN_WIDTH") env["SCREEN_HEIGHT"] = os.environ.get("SCREEN_HEIGHT") env["REDIS_HOST"] = dc.REDIS_HOST env["PYWB_HOST_PORT"] = dc.PYWB_HOST + ":8080" env["BROWSER"] = browser info = self.timed_new_container(browser, env, host, client_id) info["queue"] = 0 return info def get_randompage(self): if not self.randompages: return "/" url, ts = random.choice(self.randompages).split(" ", 1) print(url, ts) path = random.choice(self.browser_paths.keys()) return "/" + path + "/" + ts + "/" + url
class DockerController(object): def _load_config(self): with open('./config.yaml') as fh: config = yaml.load(fh) return config def __init__(self): config = self._load_config() self.LOCAL_REDIS_HOST = 'netcapsule_redis_1' self.REDIS_HOST = os.environ.get('REDIS_HOST', self.LOCAL_REDIS_HOST) self.PYWB_HOST = os.environ.get('PYWB_HOST', 'netcapsule_pywb_1') self.C_EXPIRE_TIME = config['init_container_expire_secs'] self.Q_EXPIRE_TIME = config['queue_expire_secs'] self.REMOVE_EXP_TIME = config['remove_expired_secs'] self.VERSION = config['api_version'] self.VNC_PORT = config['vnc_port'] self.CMD_PORT = config['cmd_port'] self.MAX_CONT = config['max_containers'] self.image_prefix = config['image_prefix'] self.browser_list = config['browsers'] self.browser_paths = {} for browser in self.browser_list: path = browser['path'] if path in self.browser_paths: raise Exception('Already a browser for path {0}'.format(path)) self.browser_paths[path] = browser self.default_browser = config['default_browser'] self.redirect_paths = config['redirect_paths'] self.redis = redis.StrictRedis(host=self.REDIS_HOST) self.redis.setnx('next_client', '1') self.redis.setnx('max_containers', self.MAX_CONT) throttle_samples = config['throttle_samples'] self.redis.setnx('throttle_samples', throttle_samples) throttle_max_avg = config['throttle_max_avg'] self.redis.setnx('throttle_max_avg', throttle_max_avg) self.redis.setnx('container_expire_secs', config['full_container_expire_secs']) self.T_EXPIRE_TIME = config['throttle_expire_secs'] if os.path.exists('/var/run/docker.sock'): self.cli = Client(base_url='unix://var/run/docker.sock', version=self.VERSION) else: kwargs = kwargs_from_env(assert_hostname=False) kwargs['version'] = self.VERSION self.cli = Client(**kwargs) def _get_host_port(self, info, port, default_host): info = info['NetworkSettings']['Ports'][str(port) + '/tcp'] info = info[0] host = info['HostIp'] if host == '0.0.0.0' and default_host: host = default_host return host + ':' + info['HostPort'] def timed_new_container(self, browser, env, host, client_id): start = time.time() info = dc.new_container(browser, env, host) end = time.time() dur = end - start time_key = 't:' + client_id self.redis.setex(time_key, self.T_EXPIRE_TIME, dur) throttle_samples = int(self.redis.get('throttle_samples')) print('INIT DUR: ' + str(dur)) self.redis.lpush('init_timings', time_key) self.redis.ltrim('init_timings', 0, throttle_samples - 1) return info def new_container(self, browser_id, env=None, default_host=None): browser = self.browser_paths.get(browser_id) # get default browser if not browser: browser = self.browser_paths.get(self.default_browser) container = self.cli.create_container(image=self.image_prefix + '/' + browser['id'], ports=[self.VNC_PORT, self.CMD_PORT], environment=env, ) id_ = container.get('Id') res = self.cli.start(container=id_, port_bindings={self.VNC_PORT: None, self.CMD_PORT: None}, volumes_from=['netcapsule_shared_data_1'], network_mode='netcapsule', ) info = self.cli.inspect_container(id_) ip = info['NetworkSettings']['IPAddress'] if not ip: ip = info['NetworkSettings']['Networks']['netcapsule']['IPAddress'] short_id = id_[:12] self.redis.hset('all_containers', short_id, ip) self.redis.setex('c:' + short_id, self.C_EXPIRE_TIME, 1) return {'vnc_host': self._get_host_port(info, self.VNC_PORT, default_host), 'cmd_host': self._get_host_port(info, self.CMD_PORT, default_host), } def remove_container(self, short_id, ip): print('REMOVING ' + short_id) try: self.cli.remove_container(short_id, force=True) except Exception as e: print(e) self.redis.hdel('all_containers', short_id) self.redis.delete('c:' + short_id) ip_keys = self.redis.keys(ip + ':*') for key in ip_keys: self.redis.delete(key) def remove_all(self, check_expired=False): all_containers = self.redis.hgetall('all_containers') for short_id, ip in all_containers.iteritems(): if check_expired: remove = not self.redis.get('c:' + short_id) else: remove = True if remove: self.remove_container(short_id, ip) def add_new_client(self): client_id = self.redis.incr('clients') enc_id = base64.b64encode(os.urandom(27)) self.redis.setex('cm:' + enc_id, self.Q_EXPIRE_TIME, client_id) self.redis.setex('q:' + str(client_id), self.Q_EXPIRE_TIME, 1) return enc_id, client_id def am_i_next(self, enc_id): client_id = None if enc_id: self.redis.expire('cm:' + enc_id, self.Q_EXPIRE_TIME) client_id = self.redis.get('cm:' + enc_id) if not client_id: enc_id, client_id = dc.add_new_client() if not self.throttle(): self.redis.incr('next_client') return enc_id, -1 client_id = int(client_id) next_client = int(self.redis.get('next_client')) # not next client if client_id > next_client: # if this client expired, delete it from queue if not self.redis.get('q:' + str(next_client)): print('skipping expired', next_client) self.redis.incr('next_client') # missed your number somehow, get a new one! if client_id < next_client: enc_id, client_id = self.add_new_client() else: self.redis.expire('q:' + str(client_id), self.Q_EXPIRE_TIME) return enc_id, client_id - next_client # if true, container not avail yet #if self.throttle(): self.redis.expire('q:' + str(client_id), self.Q_EXPIRE_TIME) return enc_id, client_id - next_client #self.redis.incr('next_client') #return enc_id, -1 def throttle(self): num_containers = self.redis.hlen('all_containers') max_containers = self.redis.get('max_containers') max_containers = int(max_containers) if max_containers else self.MAX_CONT if num_containers >= max_containers: return True timings = self.redis.lrange('init_timings', 0, -1) if not timings: return False timings = self.redis.mget(*timings) avg = 0 count = 0 for val in timings: if val is not None: avg += float(val) count += 1 if count == 0: return False avg = avg / count print('AVG: ', avg) throttle_max_avg = float(self.redis.get('throttle_max_avg')) if avg >= throttle_max_avg: print('Throttling, too slow...') return True return False
class DockerPyClient(DockerClient): def __init__(self, remote, username=None, password=None, email=None): super(DockerPyClient,self).__init__() self.client = Client(base_url=remote, version='1.15') if username: self.client.login(username=username, password=password, email=email) def docker_images(self, filters=None): return self.client.images(filters=filters) def __id(self, ioc): if ioc and 'Id' in ioc: return ioc['Id'] return None def docker_containers(self): return [{ 'Id': cont['Id'], 'Tag': cont['Image'], 'Image': self.__id(self.image(cont['Image'])), 'Names': cont['Names'], 'Ports': cont['Ports'], 'Created': cont['Created'], 'Command': cont['Command'], 'Status': cont['Status'], 'Running': cont['Status'].startswith('Up ') or cont['Status'].startswith('Restarting ') } for cont in self.client.containers(all=True)] def docker_pull(self, image): (repository, tag) = self.tag(image) existing = self.image(image) for line in self.client.pull(repository=repository, stream=True, insecure_registry=True): parsed = json.loads(line) if 'error' in parsed: raise Exception(parsed['error']) # Check if image updated self.flush_images() newer = self.image(image) if not existing or (newer['Id'] != existing['Id']): return True return False def docker_run(self, entry): volumes = ['/var/log/ext'] kwargs = { 'image': entry['image'], 'volumes': volumes, 'detach': True, 'environment': { 'DOCKER_IMAGE': entry['image'] } } if 'name' in entry: kwargs['name'] = entry['name'] if 'env' in entry: kwargs['environment'].update(entry['env']) if 'cpu' in entry: kwargs['cpu_shares'] = entry['cpu'] if 'memory' in entry: kwargs['mem_limit'] = entry['memory'] if 'entrypoint' in entry: kwargs['entrypoint'] = entry['entrypoint'] if 'command' in entry: kwargs['command'] = entry['command'] if 'volumes' in entry: volumes.extend([vol['containerPath'] for vol in entry['volumes'] if 'containerPath' in vol]) volsFrom = [vol['from'] for vol in entry['volumes'] if 'from' in vol] if len(volsFrom): kwargs['volumes_from'] = volsFrom if 'portMappings' in entry: kwargs['ports'] = [p['containerPort'] for p in entry['portMappings']] container = self.client.create_container(**kwargs) self.docker_start(container['Id'], entry) return container['Id'] def docker_start(self, container, entry=None): logsBound = False binds = {} restart_policy = 'on-failure' kwargs = { 'container': container, 'binds': binds } if entry is not None: if 'network' in entry: kwargs['network_mode'] = entry['network'] if 'privileged' in entry: kwargs['privileged'] = entry['privileged'] if 'volumes' in entry: volsFrom = [] for vol in entry['volumes']: if 'from' in vol: volsFrom.append(vol['from']) continue if not 'containerPath' in vol: self.log.warn('No container mount point specified, skipping volume') continue if not 'hostPath' in vol: # Just a local volume, no bindings continue binds[vol['hostPath']] = { 'bind': vol['containerPath'], 'ro': 'mode' in vol and vol['mode'].lower() == 'ro' } if vol['containerPath'] == '/var/log/ext': logsBound = True if len(volsFrom): kwargs['volumes_from'] = volsFrom if 'portMappings' in entry: portBinds = {} for pm in entry['portMappings']: portBinds[pm['containerPort']] = pm['hostPort'] if 'hostPort' in pm else None kwargs['port_bindings'] = portBinds if 'links' in entry: kwargs['links'] = entry['links'] if 'restart' in entry: restart_policy = entry['restart'] kwargs['restart_policy'] = { 'MaximumRetryCount': 0, 'Name': restart_policy } if not logsBound: binds['/var/log/ext/%s' % container] = { 'bind': '/var/log/ext', 'ro': False } self.client.start(**kwargs); def docker_signal(self, container, sig='HUP'): self.client.kill(container, sig) def docker_restart(self, container): self.client.restart(container) def docker_stop(self, container): self.client.stop(container) def docker_rm(self, container): self.client.remove_container(container) def docker_rmi(self, image): # Force removal, sometimes conflicts result from truncated pulls when # dockerup container upgrades/dies self.client.remove_image(image, force=True)
class DockerController(object): def _load_config(self): with open('./config.yaml') as fh: config = yaml.load(fh) return config def __init__(self): config = self._load_config() self.REDIS_HOST = config['redis_host'] self.PYWB_HOST = config['pywb_host'] self.EXPIRE_TIME = config['expire_secs'] self.REMOVE_EXP_TIME = config['remove_expired_secs'] self.VERSION = config['api_version'] self.VNC_PORT = config['vnc_port'] self.CMD_PORT = config['cmd_port'] self.image_prefix = config['image_prefix'] self.browsers = config['browsers'] self.redis = redis.StrictRedis(host=self.REDIS_HOST) if os.path.exists('/var/run/docker.sock'): self.cli = Client(base_url='unix://var/run/docker.sock', version=self.VERSION) else: kwargs = kwargs_from_env() kwargs['tls'].assert_hostname = False kwargs['version'] = self.VERSION self.cli = Client(**kwargs) def new_container(self, browser, env=None): tag = self.browsers.get(browser) # get default browser if not tag: tag = self.browsers[''] container = self.cli.create_container(image=self.image_prefix + '/' + tag, ports=[self.VNC_PORT, self.CMD_PORT], environment=env) id_ = container.get('Id') res = self.cli.start(container=id_, port_bindings={self.VNC_PORT: None, self.CMD_PORT: None}, links={self.PYWB_HOST: self.PYWB_HOST, self.REDIS_HOST: self.REDIS_HOST}) vnc_port = self.cli.port(id_, self.VNC_PORT) vnc_port = vnc_port[0]['HostPort'] cmd_port = self.cli.port(id_, self.CMD_PORT) cmd_port = cmd_port[0]['HostPort'] info = self.cli.inspect_container(id_) ip = info['NetworkSettings']['IPAddress'] short_id = id_[:12] self.redis.hset('all_containers', short_id, ip) self.redis.setex('c:' + short_id, self.EXPIRE_TIME, 1) return vnc_port, cmd_port def remove_container(self, short_id, ip): print('REMOVING ' + short_id) try: self.cli.remove_container(short_id, force=True) except Exception as e: print(e) self.redis.hdel('all_containers', short_id) self.redis.delete('c:' + short_id) ip_keys = self.redis.keys(ip +':*') for key in ip_keys: self.redis.delete(key) def remove_all(self, check_expired=False): all_containers = self.redis.hgetall('all_containers') for short_id, ip in all_containers.iteritems(): if check_expired: remove = not self.redis.get('c:' + short_id) else: remove = True if remove: self.remove_container(short_id, ip)
class DockerController(object): def _load_config(self): config = os.environ.get('BROWSER_CONFIG', './config.yaml') with open(config) as fh: config = yaml.load(fh) config = config['browser_config'] for n, v in config.items(): new_v = os.environ.get(n) if not new_v: new_v = os.environ.get(n.upper()) if new_v: print('Setting Env Val: {0}={1}'.format(n, new_v)) config[n] = new_v return config def __init__(self): config = self._load_config() self.name = config['cluster_name'] self.label_name = config['label_name'] self.init_req_expire_secs = config['init_req_expire_secs'] self.queue_expire_secs = config['queue_expire_secs'] self.remove_expired_secs = config['remove_expired_secs'] self.api_version = config['api_version'] self.ports = config['ports'] self.port_bindings = dict((port, None) for port in self.ports.values()) self.max_containers = config['max_containers'] self.throttle_expire_secs = config['throttle_expire_secs'] self.browser_image_prefix = config['browser_image_prefix'] self.label_browser = config['label_browser'] self.label_prefix = config['label_prefix'] self.network_name = config['network_name'] self.volume_source = config['browser_volumes'] self.shm_size = config['shm_size'] self.default_browser = config['default_browser'] self._init_cli() while True: try: self._init_redis(config) break except BusyLoadingError: print('Waiting for Redis to Load...') time.sleep(5) def _init_cli(self): if os.path.exists('/var/run/docker.sock'): self.cli = Client(base_url='unix://var/run/docker.sock', version=self.api_version) else: kwargs = kwargs_from_env(assert_hostname=False) kwargs['version'] = self.api_version self.cli = Client(**kwargs) def _init_redis(self, config): redis_url = os.environ['REDIS_BROWSER_URL'] self.redis = redis.StrictRedis.from_url(redis_url, decode_responses=True) self.redis.setnx('next_client', '1') self.redis.setnx('max_containers', self.max_containers) self.redis.setnx('num_containers', '0') # TODO: support this #self.redis.set('cpu_auto_adjust', config['cpu_auto_adjust']) # if num_containers is invalid, reset to 0 try: assert (int(self.redis.get('num_containers') >= 0)) except: self.redis.set('num_containers', 0) self.redis.set('throttle_samples', config['throttle_samples']) self.redis.set('throttle_max_avg', config['throttle_max_avg']) self.duration = int(config['container_expire_secs']) self.redis.set('container_expire_secs', self.duration) def load_avail_browsers(self, params=None): filters = {"dangling": False} if params: all_filters = [] for k, v in params.items(): if k not in ('short'): all_filters.append(self.label_prefix + k + '=' + v) filters["label"] = all_filters else: filters["label"] = self.label_browser browsers = {} try: images = self.cli.images(filters=filters) for image in images: tags = image.get('RepoTags') id_ = self._get_primary_id(tags) if not id_: continue props = self._browser_info(image['Labels']) props['id'] = id_ browsers[id_] = props except: traceback.print_exc() return browsers def _get_primary_id(self, tags): if not tags: return None primary_tag = None for tag in tags: if not tag: continue if tag.endswith(':latest'): tag = tag.replace(':latest', '') if not tag.startswith(self.browser_image_prefix): continue # pick the longest tag as primary tag if not primary_tag or len(tag) > len(primary_tag): primary_tag = tag if primary_tag: return primary_tag[len(self.browser_image_prefix):] else: return None def get_browser_info(self, name, include_icon=False): tag = self.browser_image_prefix + name try: image = self.cli.inspect_image(tag) tags = image.get('RepoTags') props = self._browser_info(image['Config']['Labels'], include_icon=include_icon) props['id'] = self._get_primary_id(tags) props['tags'] = tags return props except: traceback.print_exc() return {} def _browser_info(self, labels, include_icon=False): props = {} caps = [] for n, v in labels.items(): wr_prop = n.split(self.label_prefix) if len(wr_prop) != 2: continue name = wr_prop[1] if not include_icon and name == 'icon': continue props[name] = v if name.startswith('caps.'): caps.append(name.split('.', 1)[1]) props['caps'] = ', '.join(caps) return props def _get_host_port(self, info, port, default_host): info = info['NetworkSettings']['Ports'][str(port) + '/tcp'] info = info[0] host = info['HostIp'] if host == '0.0.0.0' and default_host: host = default_host return host + ':' + info['HostPort'] def _get_port(self, info, port): info = info['NetworkSettings']['Ports'][str(port) + '/tcp'] info = info[0] return info['HostPort'] def sid(self, id): return id[:12] def timed_new_container(self, browser, env, host, reqid): start = time.time() info = self.new_container(browser, env, host) end = time.time() dur = end - start time_key = 't:' + reqid self.redis.setex(time_key, self.throttle_expire_secs, dur) throttle_samples = int(self.redis.get('throttle_samples')) print('INIT DUR: ' + str(dur)) self.redis.lpush('init_timings', time_key) self.redis.ltrim('init_timings', 0, throttle_samples - 1) return info def new_container(self, browser_id, env=None, default_host=None): #browser = self.browsers.get(browser_id) browser = self.get_browser_info(browser_id) # get default browser if not browser: browser = self.get_browser_info(browser_id) #browser = self.browsers.get(self.default_browser) if browser.get('req_width'): env['SCREEN_WIDTH'] = browser.get('req_width') if browser.get('req_height'): env['SCREEN_HEIGHT'] = browser.get('req_height') image = browser['tags'][0] print('Launching ' + image) short_id = None try: host_config = self.create_host_config() container = self.cli.create_container( image=image, ports=list(self.ports.values()), environment=env, host_config=host_config, labels={self.label_name: self.name}, ) id_ = container.get('Id') short_id = self.sid(id_) res = self.cli.start(container=id_) info = self.cli.inspect_container(id_) ip = info['NetworkSettings']['IPAddress'] if not ip: ip = info['NetworkSettings']['Networks'][ self.network_name]['IPAddress'] self.redis.hset('all_containers', short_id, ip) result = {} for port_name in self.ports: result[port_name + '_host'] = self._get_host_port( info, self.ports[port_name], default_host) result['id'] = short_id result['ip'] = ip result['audio'] = os.environ.get('AUDIO_TYPE', '') return result except Exception as e: traceback.print_exc() if short_id: print('EXCEPTION: ' + short_id) self.remove_container(short_id) return {} def create_host_config(self): if self.volume_source: volumes_from = [self.volume_source] else: volumes_from = None host_config = self.cli.create_host_config( port_bindings=self.port_bindings, volumes_from=volumes_from, network_mode=self.network_name, shm_size=self.shm_size, cap_add=['ALL'], security_opt=['apparmor=unconfined'], ) return host_config def remove_container(self, short_id): print('REMOVING: ' + short_id) try: self.cli.remove_container(short_id, force=True) except Exception as e: print(e) reqid = None ip = self.redis.hget('all_containers', short_id) if ip: reqid = self.redis.hget('ip:' + ip, 'reqid') with redis.utils.pipeline(self.redis) as pi: pi.delete('ct:' + short_id) if not ip: return pi.hdel('all_containers', short_id) pi.delete('ip:' + ip) if reqid: pi.delete('req:' + reqid) def event_loop(self): for event in self.cli.events(decode=True): try: self.handle_docker_event(event) except Exception as e: print(e) def handle_docker_event(self, event): if event['Type'] != 'container': return if (event['status'] == 'die' and event['from'].startswith(self.browser_image_prefix) and event['Actor']['Attributes'].get( self.label_name) == self.name): short_id = self.sid(event['id']) print('EXITED: ' + short_id) self.remove_container(short_id) self.redis.decr('num_containers') return if (event['status'] == 'start' and event['from'].startswith(self.browser_image_prefix) and event['Actor']['Attributes'].get( self.label_name) == self.name): short_id = self.sid(event['id']) print('STARTED: ' + short_id) self.redis.incr('num_containers') self.redis.setex('ct:' + short_id, self.duration, 1) return def remove_expired_loop(self): while True: try: self.remove_expired() except Exception as e: print(e) time.sleep(self.remove_expired_secs) def remove_expired(self): all_known_ids = self.redis.hkeys('all_containers') all_containers = { self.sid(c['Id']) for c in self.cli.containers(quiet=True) } for short_id in all_known_ids: if not self.redis.get('ct:' + short_id): print('TIME EXPIRED: ' + short_id) self.remove_container(short_id) elif short_id not in all_containers: print('STALE ID: ' + short_id) self.remove_container(short_id) def auto_adjust_max(self): print('Auto-Adjust Max Loop') try: scale = self.redis.get('cpu_auto_adjust') if not scale: return info = self.cli.info() cpus = int(info.get('NCPU', 0)) if cpus <= 1: return total = int(float(scale) * cpus) self.redis.set('max_containers', total) except Exception as e: traceback.print_exc() def add_new_client(self, reqid): client_id = self.redis.incr('clients') #enc_id = base64.b64encode(os.urandom(27)).decode('utf-8') self.redis.setex('cm:' + reqid, self.queue_expire_secs, client_id) self.redis.setex('q:' + str(client_id), self.queue_expire_secs, 1) return client_id def _make_reqid(self): return base64.b32encode(os.urandom(15)).decode('utf-8') def _make_vnc_pass(self): return base64.b64encode(os.urandom(21)).decode('utf-8') def register_request(self, container_data): reqid = self._make_reqid() container_data['reqid'] = reqid self.redis.hmset('req:' + reqid, container_data) self.redis.expire('req:' + reqid, self.init_req_expire_secs) return reqid def am_i_next(self, reqid): client_id = self.redis.get('cm:' + reqid) if not client_id: client_id = self.add_new_client(reqid) else: self.redis.expire('cm:' + reqid, self.queue_expire_secs) client_id = int(client_id) next_client = int(self.redis.get('next_client')) # not next client if client_id != next_client: # if this client expired, delete it from queue if not self.redis.get('q:' + str(next_client)): print('skipping expired', next_client) self.redis.incr('next_client') # missed your number somehow, get a new one! if client_id < next_client: client_id = self.add_new_client(reqid) diff = client_id - next_client if self.throttle(): self.redis.expire('q:' + str(client_id), self.queue_expire_secs) return client_id - next_client #num_containers = self.redis.hlen('all_containers') num_containers = int(self.redis.get('num_containers')) max_containers = self.redis.get('max_containers') max_containers = int( max_containers) if max_containers else self.max_containers if diff <= (max_containers - num_containers): self.redis.incr('next_client') return -1 else: self.redis.expire('q:' + str(client_id), self.queue_expire_secs) return client_id - next_client def throttle(self): timings = self.redis.lrange('init_timings', 0, -1) if not timings: return False timings = self.redis.mget(*timings) avg = 0 count = 0 for val in timings: if val is not None: avg += float(val) count += 1 if count == 0: return False avg = avg / count print('AVG: ', avg) throttle_max_avg = float(self.redis.get('throttle_max_avg')) if avg >= throttle_max_avg: print('Throttling, too slow...') return True return False def _copy_env(self, env, name, override=None): env[name] = override or os.environ.get(name) def init_new_browser(self, reqid, host, width=None, height=None): req_key = 'req:' + reqid container_data = self.redis.hgetall(req_key) if not container_data: return None # already started, attempt to reconnect if 'queue' in container_data: container_data['ttl'] = self.redis.ttl('ct:' + container_data['id']) return container_data queue_pos = self.am_i_next(reqid) if queue_pos >= 0: return {'queue': queue_pos} browser = container_data['browser'] url = container_data.get('url', 'about:blank') ts = container_data.get('request_ts') env = {} env['URL'] = url env['TS'] = ts env['BROWSER'] = browser vnc_pass = self._make_vnc_pass() env['VNC_PASS'] = vnc_pass self._copy_env(env, 'PROXY_HOST') self._copy_env(env, 'PROXY_PORT') self._copy_env(env, 'PROXY_GET_CA') self._copy_env(env, 'SCREEN_WIDTH', width) self._copy_env(env, 'SCREEN_HEIGHT', height) self._copy_env(env, 'IDLE_TIMEOUT') self._copy_env(env, 'AUDIO_TYPE') info = self.timed_new_container(browser, env, host, reqid) info['queue'] = 0 info['vnc_pass'] = vnc_pass new_key = 'ip:' + info['ip'] # TODO: support different durations? self.duration = int(self.redis.get('container_expire_secs')) with redis.utils.pipeline(self.redis) as pi: pi.rename(req_key, new_key) pi.persist(new_key) pi.hmset(req_key, info) pi.expire(req_key, self.duration) info['ttl'] = self.duration return info def clone_browser(self, reqid, id_, name): short_id = self.redis.hget('req:' + reqid, 'id') #try: # container = self.cli.containers.get(short_id) #except Exception as e: # print(e) # print('Container Not Found: ' + short_id) # return {'error': str(e)} env = {} self._copy_env(env, 'PROXY_HOST') self._copy_env(env, 'PROXY_PORT') self._copy_env(env, 'PROXY_GET_CA') self._copy_env(env, 'SCREEN_WIDTH') self._copy_env(env, 'SCREEN_HEIGHT') self._copy_env(env, 'IDLE_TIMEOUT') self._copy_env(env, 'AUDIO_TYPE') env_list = [] for n, v in env.items(): if n and v: env_list.append(n + '=' + v) config = {'Env': env_list, 'Labels': {'wr.name': name}} try: exec_id = self.cli.exec_create( container=short_id, cmd="bash -c 'kill $(cat /tmp/browser_pid)'") self.cli.exec_start(exec_id=exec_id['Id'], detach=False, tty=False) time.sleep(0.5) except Exception as e: print(e) try: res = self.cli.commit(container=short_id, repository='oldwebtoday/user/' + id_, conf=config) return {'success': '1'} except Exception as e: print(e) return {'error': str(e)} def get_random_browser(self): browsers = self.load_avail_browsers() while True: id_ = random.choice(browsers.keys()) if browsers[id_].get('skip_random'): continue return id_