Example #1
0
def list_containers(args):
    """
    - API to list all running containers.
    - API is equivalent to docker ps command.

    - Response is a list of dictionary:
    [{u'Status': u'Up 25 minutes', u'Created': 1477874345, u'Image':
     u'sha256:2b786d1d393fca95d9baa72c40b7d2da8b4fc3135659b7ca3046967f8de09c15',
     u'Labels': {}, u'NetworkSettings': {u'Networks': {u'bridge': {u'NetworkID':
     u'f3211da5394d90c58365fb9f50285480735171ef88a4f21399ec08575797f21f',
     u'MacAddress': u'02:42:ac:11:00:02', u'GlobalIPv6PrefixLen': 0, u'Links': None,
     u'GlobalIPv6Address': u'', u'IPv6Gateway': u'', u'IPAMConfig': None,
     u'EndpointID': u'932507e69777dc1c9bd5784941d92722889a9d76af1a10672afb1c063c092398',
     u'IPPrefixLen': 16, u'IPAddress': u'172.17.0.2', u'Gateway': u'172.17.0.1',
     u'Aliases': None}}}, u'HostConfig': {u'NetworkMode': u'default'}, u'ImageID':
     u'sha256:2b786d1d393fca95d9baa72c40b7d2da8b4fc3135659b7ca3046967f8de09c15',
     u'State': u'running', u'Command': u'/bin/bash', u'Names': [u'/nostalgic_bhabha'],
     u'Mounts': [], u'Id': u'2b5c2b8de6610c2d443518a84ca7c56ded98fbcf9a70c02ea73746b5c05dd21e',
     u'Ports': []}]

    """

    if 'all' in args:
        args['all'] = bool(args['all'])
    invoke_clientAPI = APIClient(base_url='unix://var/run/docker.sock',
                                 version='auto')
    container_list = invoke_clientAPI.containers(**args)
    return container_list
Example #2
0
def get_server_containers(server: Server, client: docker.APIClient) -> tp.List[dict]:
    containers = client.containers()
    server_containers = []
    for container in containers:
        container = {
            'command': filter_printable(container['Command']),
            'containerId': container['Id'],
            'image': container['Image'],
            'labels': sorted([{
                'containerId': container['Id'],
                'name': l[0],
                'value': l[1]} for l in container['Labels'].items()],
                key=lambda label: label['name']),
            'name': container['Names'][0],
            'network': container['HostConfig']['NetworkMode'],
            'ports': sorted([{
                'destination': str(p['PrivatePort']),
                'hostIp': p['IP'] if 'IP' in p else None,
                'protocol': p['Type'],
                'source': str(p['PublicPort']) if 'PublicPort' in p else None} for p in container['Ports']],
                key=lambda port: (str(port['destination']), str(port['source']))),
            'privileged': client.inspect_container(container['Id'])['HostConfig']['Privileged'],
            'serverId': server.id,
            'volumes': sorted([{
                'containerId': container['Id'],
                'destination': filter_printable(v['Destination']),
                'source': filter_printable(v['Source'])} for v in container['Mounts']],
                key=lambda volume: volume['destination'])
        }
        server_containers.append(container)
    return server_containers
Example #3
0
def logs_all():
	cli = APIClient()
	for container in cli.containers(filters={"status":"running"}):
		container = threading.Thread(target=logs, args=[container])
		container.daemon = True
		container.start()
	while True:
		time.sleep(1)
Example #4
0
 def test_stop_ethereum(self):
     container = main.fresh_start_ethereum(self.config)
     self.assertIsNotNone(container)
     main.stop_ethereum()
     running_containers = []
     cli = APIClient()
     for container in cli.containers(filters={"status": "running"}):
         running_containers.append(container)
     self.assertEqual(len(running_containers), 0)
Example #5
0
def restart_ethereum():
	containers = []
	cli = APIClient()
	for container in cli.containers(filters={"name":"eth","status":"running"}):
		containers.append(container)
		cli.restart(container)
	if len(containers) > 0:
		return True
	else:
		return False
Example #6
0
def modify_random_containers(client: docker.APIClient, amount: int, action: str = 'stop') -> tp.List[dict]:
    server_containers = client.containers()
    stopped_containers = []
    for _ in range(amount):
        container = random.choice(server_containers)
        if action == 'delete':
            client.remove_container(container, force=True)
        elif action == 'stop':
            client.stop(container)
            stopped_containers.append(container)
        server_containers.remove(container)
    return stopped_containers
Example #7
0
 def tearDown(self):
     for filename in self.files:
         try:
             os.remove(filename)
         except OSError:
             pass
     with open(".env.example") as base_env:
         self.env = base_env.read()
     with open(".env", "w") as new_env:
         new_env.write(self.env)
     cli = APIClient()
     for container in cli.containers(filters={"status": "running"}):
         cli.stop(container)
         cli.remove_container(container)
Example #8
0
def update_chainlink():
	pull("chainlink")
	used_ports = []
	containers = []
	cli = APIClient()
	for container in cli.containers(filters={"ancestor":"smartcontract/chainlink","status":"running"}):
		used_ports.append(container["Ports"][1]["PublicPort"])
		containers.append(container)
	if len(used_ports) > 0:
		new_container = start_chainlink(sorted(used_ports)[-1] + 1)
		for container in containers:
			cli.kill(container["Id"])
	else:
		new_container = start_chainlink(6689)
	sys.stdout.write(new_container.container.name + "\n")
Example #9
0
 def test_stop_chainlink(self):
     self.config.write_config()
     main.generate_certs()
     main.input = lambda _: "*****@*****.**"
     main.create_api_email()
     main.input = lambda _: "password"
     main.create_api_password()
     main.create_wallet_password()
     container = main.start_chainlink(6689)
     self.assertEqual(container.ports["6689/tcp"], 6689)
     self.assertIsNotNone(container)
     main.stop_chainlink()
     running_containers = []
     cli = APIClient()
     for container in cli.containers(filters={"status": "running"}):
         running_containers.append(container)
     self.assertEqual(len(running_containers), 0)
Example #10
0
def containers_factory(d: docker.APIClient) -> typing.List[ContainerStats]:
    result = []

    for c in d.containers():
        if c["State"] == "running":
            info = d.inspect_container(c["Id"])
            labels = info["Config"]["Labels"]

            try:
                label = labels["plot.label"]
                color = labels["plot.color"]
            except KeyError:
                pass
            else:
                result.append(ContainerStats(label, c["Id"], color))

    return result
Example #11
0
def start_containers(worker_api, name_prefix, timeout=5):
    """Start containers with given prefix

    The chaincode container usually has name with `name_prefix-` as prefix

    :param worker_api: Docker daemon url
    :param name_prefix: image name prefix
    :param timeout: Time to wait for the response
    :return: None
    """
    logger.debug("Get containers, worker_api={}, prefix={}".format(
        worker_api, name_prefix))
    client = Client(base_url=worker_api, version="auto", timeout=timeout)
    containers = client.containers(all=True)
    id_cc = [e['Id'] for e in containers if
             e['Names'][0].split("/")[-1].startswith(name_prefix)]
    logger.info(id_cc)
    for _ in id_cc:
        client.start(_)
Example #12
0
def start_containers(worker_api, name_prefix, timeout=5):
    """Start containers with given prefix

    The chaincode container usually has name with `name_prefix-` as prefix

    :param worker_api: Docker daemon url
    :param name_prefix: image name prefix
    :param timeout: Time to wait for the response
    :return: None
    """
    logger.debug("Get containers, worker_api={}, prefix={}".format(
        worker_api, name_prefix))
    client = Client(base_url=worker_api, version="auto", timeout=timeout)
    containers = client.containers(all=True)
    id_cc = [e['Id'] for e in containers if
             e['Names'][0].split("/")[-1].startswith(name_prefix)]
    logger.info(id_cc)
    for _ in id_cc:
        client.start(_)
Example #13
0
def _clean_project_containers(worker_api, name_prefix, timeout=5):
    """
    Clean cluster node containers and chaincode containers

    All containers with the name prefix will be removed.

    :param worker_api: Docker daemon url
    :param name_prefix: image name prefix
    :param timeout: Time to wait for the response
    :return: None
    """
    logger.debug("Clean project containers, worker_api={}, prefix={}".format(
        worker_api, name_prefix))
    client = Client(base_url=worker_api, version="auto", timeout=timeout)
    containers = client.containers(all=True)
    id_removes = [e['Id'] for e in containers if
                  e['Names'][0].split("/")[-1].startswith(name_prefix)]
    for _ in id_removes:
        client.remove_container(_, force=True)
        logger.debug("Remove container {}".format(_))
Example #14
0
def _clean_exited_containers(worker_api):
    """ Clean those containers with exited status

    This is dangerous, as it may delete temporary containers.
    Only trigger this when no one else uses the system.

    :param worker_api: Docker daemon url
    :return: None
    """
    logger.debug("Clean exited containers")
    client = Client(base_url=worker_api, version="auto")
    containers = client.containers(quiet=True, all=True,
                                   filters={"status": "exited"})
    id_removes = [e['Id'] for e in containers]
    for _ in id_removes:
        logger.debug("exited container to remove, id={}", _)
        try:
            client.remove_container(_)
        except Exception as e:
            logger.error("Exception in clean_exited_containers {}".format(e))
Example #15
0
def _clean_exited_containers(worker_api):
    """ Clean those containers with exited status

    This is dangerous, as it may delete temporary containers.
    Only trigger this when no one else uses the system.

    :param worker_api: Docker daemon url
    :return: None
    """
    logger.debug("Clean exited containers")
    client = Client(base_url=worker_api, version="auto")
    containers = client.containers(quiet=True, all=True,
                                   filters={"status": "exited"})
    id_removes = [e['Id'] for e in containers]
    for _ in id_removes:
        logger.debug("exited container to remove, id={}", _)
        try:
            client.remove_container(_)
        except Exception as e:
            logger.error("Exception in clean_exited_containers {}".format(e))
Example #16
0
def _clean_project_containers(worker_api, name_prefix, timeout=5):
    """
    Clean cluster node containers and chaincode containers

    All containers with the name prefix will be removed.

    :param worker_api: Docker daemon url
    :param name_prefix: image name prefix
    :param timeout: Time to wait for the response
    :return: None
    """
    logger.debug("Clean project containers, worker_api={}, prefix={}".format(
        worker_api, name_prefix))
    client = Client(base_url=worker_api, version="auto", timeout=timeout)
    containers = client.containers(all=True)
    id_removes = [e['Id'] for e in containers if
                  e['Names'][0].split("/")[-1].startswith(name_prefix)]
    for _ in id_removes:
        client.remove_container(_, force=True)
        logger.debug("Remove container {}".format(_))
Example #17
0
def reset_container_host(host_type, worker_api, timeout=15):
    """ Try to detect the daemon type

    Only wait for timeout seconds.

    :param host_type: Type of host: single or swarm
    :param worker_api: Docker daemon url
    :param timeout: Time to wait for the response
    :return: host type info
    """
    try:
        client = Client(base_url=worker_api, version="auto", timeout=timeout)
        containers = client.containers(quiet=True, all=True)
        logger.debug(containers)
        for c in containers:
            client.remove_container(c['Id'], force=True)
        logger.debug("cleaning all containers")
    except Exception as e:
        logger.error("Exception happens when reset host!")
        logger.error(e)
        return False
    try:
        images = client.images(all=True)
        logger.debug(images)
        for i in images:
            if i["RepoTags"][0] == "<none>:<none>":
                logger.debug(i)
                try:
                    client.remove_image(i['Id'])
                except Exception as e:
                    logger.error(e)
                    continue
        logger.debug("cleaning <none> images")
    except Exception as e:
        logger.error("Exception happens when reset host!")
        logger.error(e)
        return False

    return setup_container_host(host_type=host_type, worker_api=worker_api)
class DockerHelper:
    def __init__(self, config):
        super().__init__()
        self.__padlock = threading.Lock()
        self.__check_in_progress = False
        self.__config = config
        self.__client = APIClient(base_url=config.docker_socket, timeout=config.docker_req_timeout_sec)
        self.__params_cache = {}
        self.last_check_containers_run_end_timestamp = datetime.datetime.min
        self.last_check_containers_run_start_timestamp = datetime.datetime.min
        self.last_check_containers_run_time = datetime.timedelta.min
        self.last_periodic_run_ok = False

    def check_container(self, container_id, remove_from_cache=False):
        try:
            if remove_from_cache:
                self.remove_from_cache(container_id)

            if not self.__config.disable_params:
                params = self.get_params(container_id)
            else:
                params = {}
            if not self.__config.disable_metrics:
                logger.debug("[{0}] Starting to fetch metrics for {1}".format(threading.current_thread().name,
                                                                              container_id))
                metrics = self.__client.stats(container=container_id, decode=True, stream=False)
            else:
                metrics = {}
            logger.debug("[{0}] Fetched data for container {1}".format(threading.current_thread().name, container_id))
        except NotFound as e:
            logger.warning("Container {0} not found - {1}.".format(container_id, e))
            return None
        except (ReadTimeout, ProtocolError, JSONDecodeError) as e:
            logger.error("Communication error when fetching info about container {0}: {1}".format(container_id, e))
            return None
        except Exception as e:
            logger.error("Unexpected error when fetching info about container {0}: {1}".format(container_id, e))
            return None
        return Container(container_id, params, metrics, 0)

    def check_containers(self):
        with self.__padlock:
            if self.__check_in_progress:
                logger.warning("[{0}] Previous check did not yet complete, consider increasing CHECK_INTERVAL_S"
                               .format(threading.current_thread().name))
                return
            self.__check_in_progress = True
        logger.debug("Periodic check start: connecting to get the list of containers")
        self.last_check_containers_run_start_timestamp = datetime.datetime.utcnow()
        try:
            containers = self.__client.containers(quiet=True)
            logger.debug("[{0}] Fetched containers list from docker daemon".format(threading.current_thread().name))
        except (ReadTimeout, ProtocolError, JSONDecodeError) as e:
            logger.error("Timeout while trying to get list of containers from docker: {0}".format(e))
            with self.__padlock:
                self.__check_in_progress = False
            self.last_periodic_run_ok = False
            return
        except Exception as e:
            logger.error("Unexpected error while trying to get list of containers from docker: {0}".format(e))
            with self.__padlock:
                self.__check_in_progress = False
            self.last_periodic_run_ok = False
            return
        ids = [container['Id'] for container in containers]
        for container_id in ids:
            container = self.check_container(container_id)
            if container is None:
                continue
            yield container
        logger.debug("Containers checked")
        if self.__config.cache_params:
            logger.debug("Purging cache")
            self.purge_cache(ids)
        self.last_periodic_run_ok = True
        self.last_check_containers_run_end_timestamp = datetime.datetime.utcnow()
        self.last_check_containers_run_time = self.last_check_containers_run_end_timestamp \
            - self.last_check_containers_run_start_timestamp
        logger.debug("Periodic check done")
        with self.__padlock:
            self.__check_in_progress = False

    def get_params(self, container_id):
        if self.__config.cache_params and container_id in self.__params_cache:
            logger.debug("Returning cached params for container {0}".format(container_id))
            return self.__params_cache[container_id]

        logger.debug("[{0}] Starting to fetch params for {1}".format(threading.current_thread().name, container_id))
        try:
            params = self.__client.inspect_container(container_id)
        except NotFound as e:
            logger.warning("Container {0} not found - {1}.".format(container_id, e))
            return None
        except (ReadTimeout, ProtocolError, JSONDecodeError) as e:
            logger.error("Communication error when fetching params for container {0}: {1}".format(container_id, e))
            return {}
        except Exception as e:
            logger.error("Unexpected error when fetching params for container {0}: {1}".format(container_id, e))
            return {}
        logger.debug("[{0}] Params fetched for {1}".format(threading.current_thread().name, container_id))
        if not self.__config.cache_params:
            return params

        logger.debug("[{0}] Storing params of {1} in cache".format(threading.current_thread().name, container_id))
        self.__params_cache[container_id] = params
        return params

    def purge_cache(self, running_container_ids):
        diff = [c for c in self.__params_cache.keys() if c not in running_container_ids]
        for cid in diff:
            self.__params_cache.pop(cid, None)

    def remove_from_cache(self, container_id):
        self.__params_cache.pop(container_id, None)

    def get_events_observable(self):
        successful = False
        ev = None
        while not successful:
            try:
                ev = self.__client.events(decode=True)
            except (ReadTimeout, ProtocolError, JSONDecodeError) as e:
                logger.error("Communication error when subscribing for container events, retrying in 5s: {0}".format(e))
                time.sleep(5)
            except Exception as e:
                logger.error("Unexpected error when subscribing for container events, retrying in 5s: {0}".format(e))
                time.sleep(5)
            successful = True
        return ev

    def kill_container(self, container):
        try:
            self.__client.stop(container.params['Id'])
        except (ReadTimeout, ProtocolError) as e:
            logger.error("Communication error when stopping container {0}: {1}".format(container.cid, e))
        except Exception as e:
            logger.error("Unexpected error when stopping container {0}: {1}".format(container.cid, e))
Example #19
0
def stop_chainlink():
	cli = APIClient()
	for container in cli.containers(filters={"ancestor":"smartcontract/chainlink","status":"running"}):
		cli.kill(container["Id"])
Example #20
0
def logs_chainlink():
	cli = APIClient()
	for container in cli.containers(filters={"ancestor":"smartcontract/chainlink","status":"running"}):
		logs(container)
Example #21
0
def logs_ethereum():
	cli = APIClient()
	for container in cli.containers(filters={"name":"eth","status":"running"}):
		logs(container)
Example #22
0
def remove_all_containers():
    cli = APIClient(base_url='unix://var/run/docker.sock')
    for container in cli.containers(all=True):
        cli.stop(container['Id'], timeout=2)
        cli.remove_container(container['Id'])
Example #23
0
class DockerClient:
    def __init__(self, url, name):
        self.url = url
        self.name = name
        self.cli = APIClient(base_url=url)
        self.bwb_instance_id = str(
            subprocess.check_output(
                'cat /proc/self/cgroup | grep devices | head -1 | cut -d "/" -f3 | sed "s/.*-//g" | sed "s/\..*//g"',
                shell=True,
                universal_newlines=True,
            )).splitlines()[0]
        self.bwbMounts = {}
        self.findVolumeMappings()
        self.findShareMountPoint(overwrite=True)
        #self.findShareMountPoint()
        self.logFile = None
        self.schedulerStarted = False

    def getClient(self):
        return self.cli

    def getName(self):
        return self.name

    def getUrl(self):
        return self.url

    def images(self):
        return self.cli.images(all=True)

    def has_image(self, name, version="latest"):
        if not name:
            return False
        repoTag = name + ":" + version
        conId = subprocess.check_output(["docker", "images", "-q", repoTag])
        if conId:
            return True
        return False

    def remove_image(self, id, force=False):
        self.cli.remove_image(id, force=force)

    def pull_image(self, id):
        self.cli.pull(id)

    def containers(self, all=True):
        return self.cli.containers(all=all)

    def findMaxIterateValues(self, settings):
        maxThreads = 0
        maxRam = 0
        if "iteratedAttrs" in settings:
            for attr in settings["iteratedAttrs"]:
                if (attr in settings["data"]
                        and "threads" in settings["data"][attr]
                        and settings["data"][attr]["threads"]):
                    if int(settings["data"][attr]["threads"]) > maxThreads:
                        maxThreads = int(settings["data"][attr]["threads"])
                if (attr in settings["data"]
                        and "ram" in settings["data"][attr]
                        and settings["data"][attr]["ram"]):
                    ramSize = int(settings["data"][attr]["ram"])
                    if ramSize > maxRam:
                        maxRam = ramSize
        return maxThreads, maxRam

    def create_container_external(
        self,
        name,
        volumes=None,
        cmds=None,
        environment=None,
        hostVolumes=None,
        consoleProc=None,
        exportGraphics=False,
        portMappings=None,
        testMode=False,
        logFile=None,
        scheduleSettings=None,
        iterateSettings=None,
        iterate=False,
    ):
        tasksJson = []
        count = 0
        cpuCount = '8'
        memory = '8096'
        for cmd in cmds:
            taskJson = TaskJson(name)
            taskJson.addBaseArgs(cmd)
            for env, var in environment.items():
                taskJson.addEnv(env, var)
            for container_dir, host_dir in hostVolumes.items():
                taskJson.addVolume(host_dir, container_dir, "rw")
            if exportGraphics:
                taskJson.addEnv("DISPLAY", ":1")
                taskJson.addVolume("/tmp/.X11-unix", "/tmp/.X11-unix", "rw")
            maxThreads = 1
            maxRam = 0
            if iterate and iterateSettings:
                maxThreads, maxRam = self.findMaxIterateValues(iterateSettings)
            taskJson.addThreadsRam(maxThreads, maxRam)
            taskJson.addName("cmdName{}".format(count))
            taskJson.addDescription("command{}".format(count))
            tasksJson.append(taskJson)
            count += 1

        namespace = str(uuid.uuid4().hex)[0:19]
        dockerJson = DockerJson(tasksJson, namespace=namespace)
        #jsonFile = "/data/dockerTest.json"
        jsonFile = "/tmp/docker.{}.json".format(namespace)
        with open(jsonFile, "w") as outfile:
            json.dump(dockerJson.jsonObj, outfile)
        parms = [namespace, jsonFile, cpuCount, memory]
        consoleProc.start(parms, schedule=True)

    def prettyEnv(self, var):
        if type(var) is list:
            output = "["
            for v in var:
                #strip single quotes
                if v[0] == "'" and v[-1] == "'":
                    v = v[1:-1]
                output += '\\"{}\\",'.format(v)
            #delete extra comma and replace with ]
            output = output[:-1] + "]"
            #check if output is empty
            if output == "]":
                output = "[]"
            return output
        else:
            try:
                if var[0] == "'" and var[-1] == "'":
                    return var[1:-1]
            except TypeError:
                return var
            except IndexError:
                return var
            return var

    def create_container_iter(
        self,
        volumes=None,
        cmds=None,
        environment=None,
        hostVolumes=None,
        consoleProc=None,
        exportGraphics=False,
        portMappings=None,
        testMode=False,
        logFile=None,
        outputFile=None,
        scheduleSettings=None,
        iterateSettings=None,
        iterate=False,
    ):
        # reset logFile when it is not None - can be "" though - this allows an active reset
        if logFile is not None:
            self.logFile = logFile
        volumeMappings = ""
        for container_dir, host_dir in hostVolumes.items():
            volumeMappings = volumeMappings + "-v {}:{} ".format(
                self.to_best_host_directory(host_dir), container_dir)
        envs = ""
        for env, var in environment.items():
            # strip whitespace
            env.strip()
            # strip quotes if present
            if env[0] == env[-1] and env.startswith(("'", '"')):
                env = env[1:-1]
            envs = envs + "-e {}={} ".format(env, self.prettyEnv(var))
        # create container cmds
        # the runDockerJob.sh script takes care of the first part of the docker command and cidfile
        # docker  run -i --rm --init --cidfile=<lockfile>'
        dockerBaseFlags = ""
        dockerCmds = []
        if exportGraphics:
            dockerBaseFlags += "-e DISPLAY=:1 -v /tmp/.X11-unix:/tmp/.X11-unix "
        if portMappings:
            for mapping in portMappings:
                dockerBaseFlags += "-p {} ".format(mapping)
        for cmd in cmds:
            dockerCmds.append(dockerBaseFlags +
                              " {} {} {}".format(volumeMappings, envs, cmd))
        consoleProc.state = "running"
        for dcmd in dockerCmds:
            sys.stderr.write("{}\n".format(dcmd))
        # pass on iterateSettings
        if iterate and iterateSettings:
            sys.stderr.write("adding iterate settings\n")
            consoleProc.addIterateSettings(iterateSettings)
            sys.stderr.write("added iterate settings\n")
        else:
            envs + "-e NWORKERS=1 "
        if testMode:
            baseCmd = "docker  run -i --rm --init "
            echoStr = ""
            for dockerCmd in dockerCmds:
                fullCmd = baseCmd + dockerCmd
                echoStr = echoStr + fullCmd + "\n"
            print(echoStr)
            # Do not test for logFile - this may be None if it is not the first widget in testMode
            if self.logFile:
                with open(self.logFile, "a") as f:
                    f.write(echoStr)

            consoleProc.startTest(echoStr)
        else:
            sys.stderr.write("starting runDockerJob.sh\n")
            consoleProc.start(dockerCmds, outputFile=outputFile)

    def findShareMountPoint(self, overwrite=False):
        if not os.getenv('BWBSHARE' or overwrite):
            bwbshare = ""
            bwbhostshare = ""
            if self.bwbMounts:
                for key in self.bwbMounts:
                    if not bwbshare or "share" in self.bwbMounts[key]:
                        bwbhostshare = key + "/.bwbshare"
                        bwbshare = self.bwbMounts[key] + "/.bwbshare"
            if not bwbshare:
                bwbshare = "/tmp/.X11/.bwbshare"
                bwbhostshare = "/tmp/.X11/.bwbshare"
            #remove dir if present and make it
            os.system("rm -rf {}".format(bwbshare))
            os.system("mkdir -p {}".format(bwbhostshare))
            os.environ['BWBSHARE'] = bwbshare
            os.environ['BWBHOSTSHARE'] = bwbhostshare

        #check if mountpoint variable exists
    def findVolumeMappings(self):
        for c in self.cli.containers():
            container_id = c["Id"]
            if container_id == self.bwb_instance_id:
                for m in c["Mounts"]:
                    sys.stderr.write(
                        "Container mount points include {}\n".format(m))
                    if not ("/var/run" in m["Source"]
                            or "/tmp/.X11-unix" in m["Source"]):
                        self.bwbMounts[m["Source"]] = m["Destination"]

    def to_best_host_directory(self, path, returnNone=False):
        sys.stderr.write("bwbMounts are {}\n".format(self.bwbMounts))
        if self.bwbMounts == {}:
            self.findVolumeMappings()
            sys.stderr.write("bwbMounts after findVolume are {}\n".format(
                self.bwbMounts))
            if not self.shareMountPoint:
                self.shareMountPoint["bwb"] = "/tmp/.X11/.bwb"
                self.shareMountPoint["host"] = "/tmp/.X11/.bwb"
        bestPath = None
        for source, dest in self.bwbMounts.items():
            absPath = self.to_host_directory(path, source, dest)
            if absPath is not None:
                if bestPath is None:
                    bestPath = absPath
                elif len(absPath) < len(bestPath):
                    bestPath = absPath
        if bestPath is None:
            if returnNone:
                return None
            return path
        return bestPath

    def to_host_directory(self, path, source, dest):
        cleanDestination = os.path.normpath(dest)
        cleanPath = os.path.normpath(path)
        cleanSource = os.path.normpath(source)
        # check if it is already relative to host path
        if cleanSource in cleanPath:
            return path

        # if the path is not mapping from host, will return path
        if cleanDestination not in cleanPath:
            return None
        abspath = os.path.normpath(
            str.join(
                os.sep,
                (
                    cleanSource,
                    path[path.find(cleanDestination) + len(cleanDestination):],
                ),
            ))
        return abspath
Example #24
0
class Docker:
    def __init__(self, base_url=None):
        self.handle = None
        self.connect_docker_daemon(base_url)

    def connect_docker_daemon(self, base_url=None):
        """
		This method is used for connect local/remote docker host daemon
		:return: Return the docker operation handle for local host
		"""
        if base_url is None:
            base_url = 'unix:///var/run/docker.sock'
        try:
            self.handle = APIClient(base_url=base_url)
        except errors.APIError as e:
            print e.message
            global logging
            logging.error(str(e.message))

    def login_registry(self, login_user, login_pass, registry_srv=None):
        """
		This method is used for log into docker registry server.
		:param login_user:  str: user name for login registry
		:param login_pass:  str: password for login registry
		:param registry_srv: str: uri for registry server address
		:return: result of login status for that registry
		"""
        login_status = self.handle.login(username=login_user,
                                         password=login_pass,
                                         registry=registry_srv)
        return login_status

    def get_docker_info(self):
        """
		Get docker information
		:return: DICT string
		"""
        return self.handle.info()

    def get_image_list(self):
        """
		Get all of the existing images list
		:return: DICT string for all of the images
		"""
        return self.handle.images()

    def public_image_search(self, keyword):
        """
		get a result for searching the image from public/logged in registry
		:return:  DICT string of search result
		"""
        return self.handle.search(keyword)

    # TODO: get docker events implementation
    # def get_docker_events(self, since, until, filters, decode):
    # 	"""
    # 	get running docker service events
    # 	:return: DICT for service envents
    # 	"""
    # 	return self.handle.event()

    def get_disk_utils(self):
        """
		get disk utilization for docker images
		:return: DICT of disk utilization
		"""
        return self.handle.df()

    def pull_image(self, name, tag=None, repo=None):
        """
		pull image from repository by repo/name:tag
		:param repo: String of repository(registry) name
		:param name: String of image name
		:param tag: String of tag name
		:return: DICT response
		"""
        if tag is None:
            tag = "latest"
        try:
            if repo is None:
                return self.handle.pull(name, tag=tag)
            else:
                return self.handle.pull(repo + "/" + name, tag)
        except errors.NotFound as e:
            return {'message': 'Image Not Found', 'status': 'failed'}

    def inspect_image(self, image_id):
        """
		inspect an image
		:param image_id: String of docker image ID
		:return: DICT of inspecting results
		"""
        # TODO: will support image_id and "repo/name:tag" later
        return self.handle.inspect_image(image_id)

    def remove_image(self, image_id, force_remove=False):
        """
		remove the specified image by image id
		:param image_id: String of Docker Image
		:param force_remove: True or False
		:return: DICT of result
		"""
        return self.handle.remove_image(image_id, force=force_remove)

    def tag_image(self, image, repository, force=False, tag=None):
        """
		tag an image to new repository
		:param image: string of image id which to be tagged
		:param repository: string of new repository which image will be tag into
		:param tag: String new tag
		:param force: True or false
		:return: Boolean result of tag
		"""
        return self.handle.tag(image, repository, tag, force=force)

    def push_image(self, repository, tag=None, stream=False, auth_config=None):
        """
		push image to new repository
		:param repository:  String for image to be push. Image ID or Repo/Name:tag
		:param tag: Tag for pushed image, if you don't need to change the tag, keep None.
		:param stream: by default is false stream the outpu as blocking generator
		:param auth_config: overrride the credential for login()
		:return: Result String or Generator(when use stream=True)
		"""
        if auth_config is None:
            return self.handle.push(repository, tag, stream=stream)
        else:
            return self.handle.pull(repository,
                                    tag,
                                    stream=stream,
                                    auth_config=auth_config)

    def save_image(self, image_name, save_path, tarball_name=None):
        """
		save specified image to a tarball
		:param image_name: string of Image ID or "repository/image:tag"
		:param save_path:  string of path
		:param tarball_name: string of tarball name. If not specified it will use the image_name_datetime.tar
		:return: return status
		"""
        if tarball_name is None:
            tarball_name = image_name + "_" + str(
                time.time()).split('.')[0] + ".tar"
        try:
            img = self.handle.get_image(image_name)
            with open(save_path + '/' + tarball_name, 'w') as f:
                for chunk in img:
                    f.write(chunk)
            return {
                "message":
                "Image {} saved at {}".format(image_name,
                                              save_path + "/" + tarball_name),
                "status":
                "succeed"
            }
        except Exception as e:
            return {"message": e.message, "status": "failed"}

    def load_image(self, tarball_name, repository, tag=None, changes=None):
        """
		load image from local path or url load tarball image
		:param tarball_name:  string of full path of tarball image
		:param repository: string of full name of image name to be assign 'repo/name'
		:param tag: string of imported image. If set None, the tag will followed as the original image tag
		:return: return
		"""
        if repository is None or str(repository).strip() == "":
            repository = None
        if tag is None or str(tag).strip() == "":
            tag = None
        if changes is None or str(changes).strip() == "":
            changes = None
        return self.handle.import_image(tarball_name,
                                        repository=repository,
                                        tag=tag,
                                        changes=changes)

    def get_containers(self, all=False):
        """
		get list of containers.
		:param all: by default is 'False'. It only shows the running containers. otherwise it shows all containers include the stop/exit ones.
		:return: return the dict of containers.
		"""
        # TODO: 'filter' function will be added later.
        return self.handle.containers(all=all)

    def new_container(self, args):
        """
		create container according to the passed in parameters
		:param args: parameters dict
		:return:  return new container id
		"""
        result = self.handle.create_container(
            image=args.get('image'),
            command=args.get('command'),
            hostname=args.get('hostname'),
            user=args.get('user'),
            detach=False if args.get('detach') is None else args.get('detach'),
            stdin_open=False
            if args.get('stdin_open') is None else args.get('stdin_open'),
            tty=False if args.get('tty') is None else args.get('tty'),
            ports=args.get('ports'),
            environment=args.get('environment'),
            volumes=args.get('volumes'),
            network_disabled=False if args.get('network_disabled') is None else
            args.get('network_disabled'),
            name=args.get('name'),
            entrypoint=args.get('entrypoint'),
            working_dir=args.get('working_dir'),
            domainname=args.get('domainname'),
            host_config=args.get('host_config'),
            mac_address=args.get('mac_address'),
            labels=args.get('labels'),
            stop_signal=args.get('stop_signal'),
            networking_config=args.get('networking_config'),
            healthcheck=args.get('healthcheck'),
            stop_timeout=args.get('stop_timeout'),
            runtime=args.get('runtime'))
        return result

    def gen_host_conf(self, args):
        host_config = self.handle.create_host_config(
            # auto remove the container after it exited
            auto_remove=False
            if args.get('auto_remove') is None else args.get('auto_remove'),
            # volume binds  BOOL
            binds=args.get('binds'),
            # BlockIO weight relative device weight in form of : [{"Path":"device_path, "Weight": weight}] DICT
            blkio_weight_device=args.get('blkio_weight_device'),
            # Block IO weight, relative weight. accepts a weight value between 10 and 1000 INT
            blkio_weight=args.get('blkio_weight'),
            # Add kernel capabilities. eg. ['SYS_ADMIN', "MKNOD"]  str or List
            cap_add=args.get('cap_add'),
            # Drop kernel capabilities str or LIST
            cap_drop=args.get('cap_drop'),
            # The length of a CPU period in microseconds  INT
            cpu_period=args.get('cpu_period'),
            # Microseconds of CPU time that the container can get in a CPU period INT
            cpu_quota=args.get('cpu_quota'),
            # CPU shares (relative weight) INT
            cpu_shares=args.get('cpu_shares'),
            # CPUs in which to allow execution (0-3, 0, 1)  str
            cpuset_cpus=args.get('cpuset_cpus'),
            # Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effecive on NUMA systems
            cpuset_mems=args.get('cpuset_mems'),
            # A list of cgroup rules to apply to the container LIST
            device_cgroup_rules=args.get('device_cgroup_rules'),
            # Limit read rate (bytes per sec) from a device in the form of : [{"Path":"device_path", "Rate":rate}]
            device_read_bps=args.get('device_read_bps'),
            # Limite read rate(IOPS) from a device
            device_read_iops=args.get('device_read_iops'),
            # Limit write rate (byte per sec) from a device.
            device_write_bps=args.get('device_write_bps'),
            # Limit write rate (IOPS) from a device
            device_write_iops=args.get('device_write_iops'),
            # Expose host devices to the container, as a list of string in the form  <path_on_host>:<path_in_container>:<cgroup_permissions>  LIST
            # Eg  /dev/sda:/dev/xvda:rwm allows container to hve read-write access to the host's /dev/sda via a node name /dev/xvda inside the container
            devices=args.get('devices'),
            # Set custom DNS servers LIST
            dns=args.get('dns'),
            # Additional options to be added to the container's resolve.conf file  LIST
            dns_opt=args.get('dns_opt'),
            # DNS search domains  LIST
            dns_search=args.get('dns_search'),
            # Addtional hostname to resolve inside the container as a mapping of hostname to IP address DICT
            extra_hosts=args.get('extra_hosts'),
            # List of additional group names and/or IDs that the container process iwll run as LIST
            group_add=args.get('group_add'),
            # Run an init inside the container that forwards signals and reaps process BOOL
            init=False if args.get('init') is None else args.get('init'),
            # Path to the docker-init binary
            init_path=args.get('init_path'),
            # Set the IPC mode for the container STRING
            ipc_mode=args.get('ipc_mode'),
            # Isolation technology to use. Default is None
            isolation=args.get('isolation'),
            # Either a dictionary mapping name to alias or as a list of (name, alias) tuples. DICT or LIST of TUPLES
            links=args.get('links'),
            # logging configuration, as a dictionary with keys:
            #   type: the logging driver name
            #   config: a dictionary of configuration for the logging driver
            log_config=args.get('log_config'),
            # LXC Config DICT
            lxc_conf=args.get('lxc_conf'),
            # memory limit. accepts float values which represent the memroy limit of created container in bytes or
            # a string with a units identification char(10000b, 10000K, 128m, 1g). If a string is specified without a
            # units character, byte are assumed as an   FLOAT or STR
            mem_limit=args.get('mem_limit'),
            # Tune a container's memory swappiness behavior. accepts number between 0 and 100. INT
            mem_swappiness=args.get('mem_swappiness'),
            # Maximum amount of memory + swap a container is allowed to consume. STR or INT
            memswap_limit=args.get('memswap_limit'),
            # Specification for mounts to be added to the container. More powerful alternative to binds.
            # Each item in the list is expected to be a docker.types.Mount object.  LIST
            mounts=args.get('mounts'),
            # Network mode:  STR
            #   bridge: Create a new network stack for the container on the bridge network
            #   none:   No network for this container
            #   container:<name|id> Reuse another container's netowrk stack.
            #   host:   Use the host network stack.
            network_mode=args.get('network_mode'),
            # whether to disable OOM killer BOOL
            oom_kill_disable=True if args.get('oom_kill_disable') is None else
            args.get('oom_kill_disable'),
            # An integer value containing the score given to the container in order to turn OOM killer preference INT
            oom_score_adj=args.get('oom_score_adj'),
            # If set to 'host', use the host PID namespace inside the container. STR
            pid_mode='host'
            if args.get('pid_mode') is None else args.get('pid_mode'),
            # Tune a container's pids limit. Set -1 for unlimited. INT
            pid_limit=-1
            if args.get('pid_limit') is None else args.get('pid_limit'),
            # binging port for host and container
            port_bindings=args.get('port_bindings'),
            # give extended privileges to this container  BOOL
            privileged=False
            if args.get('privileged') is None else args.get('privileged'),
            # publish all ports to the hosts BOOL
            publish_all_ports=False if args.get('publish_all_ports') is None
            else args.get('publish_all_ports'),
            # mount the container's root filesystem as read only  BOOL
            read_only=False
            if args.get('read_only') is None else args.get('read_only'),
            # restart policy DICT
            #   Name one of 'on-failure' or 'always'
            #   MaximumRetryCount: Number of time to restart to container on failure
            restart_policy=args.get('restart_policy'),
            # A list of string values to customize labels for MLS system such as SELinux LIST
            security_opt=args.get('security_opt'),
            # Size of /dev/shm (eg.1G)  str or int
            shm_size=args.get('shm_size'),
            # Storage driver options per container as a key-value mapping  DICT
            storage_opt=args.get('storage_opt'),
            # kernel parameters to set in the container  DICT
            sysctls=args.get('sysctls'),
            # Temporary filesystems to mount, as a dictonary mapping a path inside the container to options for that path
            # eg. {'/mnt/vol1': '', '/mnt/vol2': 'size=3G, uid=1000'}
            tmpfs=args.get('tmpfs'),
            # ulimits to set inside the container as a list of dicts
            ulimits=args.get('ulimits'),
            # sets the user namespace mode for the container when user namespace remapping option is enables.
            # Supported values are: host STRING
            usens_mode=args.get('usens_mode'),
            # List of container names or IDS to get volumes from  LIST
            volumes_from=args.get('volumes_from'),
            # runtime to use with this container
            runtime=args.get('runtime'))
        return host_config

    def gen_net_conf(self, args):
        """
		Generate netowrking config for creating a container
		:param args:  paramters for creating network
		:return: dictionary of a networking configuration file
		"""
        # Ref: http://docker-py.readthedocs.io/en/stable/api.html#docker.api.container.ContainerApiMixin.create_networking_config
        network_dict = self.handle.create_networking_config(
            {args['network_name']: self.gen_ep_conf(args)})
        return network_dict

    def gen_ep_conf(self, args):
        """
		This function is used for crate an endpoint parameters dictionary for create_networking_config
		:param args: Pass-in Parameters for Endpoint information
		:return:  Endpoint dictionary
		"""
        # Ref: http://docker-py.readthedocs.io/en/stable/api.html#docker.api.container.ContainerApiMixin.create_endpoint_config
        endpoint_dict = self.handle.create_endpoint_config(
            aliases=args['aliases'],
            links=args['links'],
            ipv4_address=args['ipv4_address'],
            ipv6_address=args['ipv6_address'],
            link_local_ips=args['link_local_ips'])
        return endpoint_dict

    def start_container(self, container_id):
        """
		This func is for start a created container by ID
		:param container_id: string of container ID or Name Tag
		:return: dict of status
		"""
        return self.handle.start(container_id)

    def stop_container(self, container_id):
        """
		This method is for stopping a running container by ID or Name
		:param container_id: String of container ID or name
		:return:  Dict of return status
		"""
        return self.handle.stop(container_id)

    def restart_container(self, container_id):
        """
		This function is for restart a container by container id or name
		:param container_id: string of container id or name
		:return: dict of status
		"""
        return self.handle.restart(container_id)

    def remove_container(self, container_id):
        """
		This function is used for remove a stopped container by ID or Name
		:param container_id: String of container ID or Name
		:return: DICT of status
		"""
        return self.handle.remove_container(container_id)

    def list_mapping_ports(self, container_id):
        """
		This func will show all of the mapping of host-> container ports.
		:param container_id:  String of Container Name or ID
		:return: dict of ports mapping table
		"""
        return self.handle.port(container_id)

    def commit_to_image(self, args):
        """
		This function is used for commiting the changed container to a image
		:param args[container_id]: container id or name
		:return: dict of status
		"""
        return self.handle.commit(container=args.get('container_id'),
                                  repository=args.get('repo_name'),
                                  tag=args.get('tag_name'),
                                  message=args.get('message'),
                                  author=args.get('author'),
                                  changes=args.get('changes'),
                                  conf=args.get('conf'))

    def pull_container_log(self, args):
        """
		Pull logs of a running container
		:param args: args[container_id]: container id or name
		:return: return list of log lines
		"""
        return str(self.handle.logs(args['container_id'])).split('\n')

    def attach_container(self, container_id):
        # This 'attach' function also allow multiple parameters, this version only implement one
        # https://docker-py.readthedocs.io/en/stable/containers.html?highlight=exec#docker.models.containers.Container.attach
        return self.handle.attach(container_id)

    def exec_container(self, args):
        # there will be more parameters for choose, deployment later
        # in this version, only pass the 'cmd' parameter into method, other parameters keeps default value.
        # https://docker-py.readthedocs.io/en/stable/containers.html?highlight=exec#docker.models.containers.Container.exec_run

        return self.handle.exec_run(args['cmd'])

    def container_top(self, args):
        return self.handle.top(args['container_id'])

    def container_res_usage(self, args):
        # Method 'stats' returns a generator. Need to use next(gen) to get data
        return self.handle.stats(args['container_id'])

    def container_info(self, args):
        return self.handle.inspect_container(args['container_id'])
Example #25
0
def list_containers():
    cli = APIClient(base_url=base_tcp_url)
    containers = cli.containers(all=True)
    return jsonify(containers)
class DockerController(object):
    def _load_config(self):
        config = os.environ.get('BROWSER_CONFIG', './config.yaml')
        with open(config) as fh:
            config = yaml.load(fh)

        config = config['browser_config']
        for n, v in config.items():
            new_v = os.environ.get(n)
            if not new_v:
                new_v = os.environ.get(n.upper())

            if new_v:
                print('Setting Env Val: {0}={1}'.format(n, new_v))
                config[n] = new_v

        return config

    def __init__(self):
        config = self._load_config()

        self.name = config['cluster_name']
        self.label_name = config['label_name']

        self.init_req_expire_secs = config['init_req_expire_secs']
        self.queue_expire_secs = config['queue_expire_secs']

        self.remove_expired_secs = config['remove_expired_secs']

        self.api_version = config['api_version']

        self.ports = config['ports']
        self.port_bindings = dict((port, None) for port in self.ports.values())

        self.max_containers = config['max_containers']

        self.throttle_expire_secs = config['throttle_expire_secs']

        self.browser_image_prefix = config['browser_image_prefix']

        self.label_browser = config['label_browser']
        self.label_prefix = config['label_prefix']

        self.network_name = config['network_name']
        self.volume_source = config['browser_volumes']
        self.shm_size = config['shm_size']

        self.default_browser = config['default_browser']

        self._init_cli()

        while True:
            try:
                self._init_redis(config)
                break
            except BusyLoadingError:
                print('Waiting for Redis to Load...')
                time.sleep(5)

    def _init_cli(self):
        if os.path.exists('/var/run/docker.sock'):
            self.cli = APIClient(base_url='unix://var/run/docker.sock',
                                 version=self.api_version)
        else:
            kwargs = kwargs_from_env(assert_hostname=False)
            kwargs['version'] = self.api_version
            self.cli = APIClient(**kwargs)

    def _init_redis(self, config):
        redis_url = os.environ['REDIS_BROWSER_URL']

        self.redis = redis.StrictRedis.from_url(redis_url,
                                                decode_responses=True)

        self.redis.setnx('next_client', '1')
        self.redis.setnx('max_containers', self.max_containers)
        self.redis.setnx('num_containers', '0')

        # TODO: support this
        #self.redis.set('cpu_auto_adjust', config['cpu_auto_adjust'])

        # if num_containers is invalid, reset to 0
        try:
            assert (int(self.redis.get('num_containers') >= 0))
        except:
            self.redis.set('num_containers', 0)

        self.redis.set('throttle_samples', config['throttle_samples'])

        self.redis.set('throttle_max_avg', config['throttle_max_avg'])

        self.duration = int(config['container_expire_secs'])
        self.redis.set('container_expire_secs', self.duration)

    def load_avail_browsers(self, params=None):
        filters = {"dangling": False}

        if params:
            all_filters = []
            for k, v in params.items():
                if k not in ('short'):
                    all_filters.append(self.label_prefix + k + '=' + v)
            filters["label"] = all_filters
        else:
            filters["label"] = self.label_browser

        browsers = {}
        try:
            images = self.cli.images(filters=filters)

            for image in images:
                tags = image.get('RepoTags')
                id_ = self._get_primary_id(tags)
                if not id_:
                    continue

                props = self._browser_info(image['Labels'])
                props['id'] = id_

                browsers[id_] = props

        except:
            traceback.print_exc()

        return browsers

    def _get_primary_id(self, tags):
        if not tags:
            return None

        primary_tag = None
        for tag in tags:
            if not tag:
                continue

            if tag.endswith(':latest'):
                tag = tag.replace(':latest', '')

            if not tag.startswith(self.browser_image_prefix):
                continue

            # pick the longest tag as primary tag
            if not primary_tag or len(tag) > len(primary_tag):
                primary_tag = tag

        if primary_tag:
            return primary_tag[len(self.browser_image_prefix):]
        else:
            return None

    def load_browser(self, name, include_icon=False):
        tag = self.browser_image_prefix + name

        try:
            image = self.cli.inspect_image(tag)
            tags = image.get('RepoTags')
            props = self._browser_info(image['Config']['Labels'],
                                       include_icon=include_icon)
            props['id'] = self._get_primary_id(tags)
            props['tags'] = tags
            return props

        except:
            traceback.print_exc()
            return {}

    def _browser_info(self, labels, include_icon=False):
        props = {}
        caps = []
        for n, v in labels.items():
            wr_prop = n.split(self.label_prefix)
            if len(wr_prop) != 2:
                continue

            name = wr_prop[1]

            if not include_icon and name == 'icon':
                continue

            props[name] = v

            if name.startswith('caps.'):
                caps.append(name.split('.', 1)[1])

        props['caps'] = ', '.join(caps)

        return props

    def _get_host_port(self, info, port, default_host):
        info = info['NetworkSettings']['Ports'][str(port) + '/tcp']
        info = info[0]
        host = info['HostIp']
        if host == '0.0.0.0' and default_host:
            host = default_host

        return host + ':' + info['HostPort']

    def _get_port(self, info, port):
        info = info['NetworkSettings']['Ports'][str(port) + '/tcp']
        info = info[0]
        return info['HostPort']

    def sid(self, id):
        return id[:12]

    def timed_new_container(self, browser, env, host, reqid):
        start = time.time()
        info = self.new_container(browser, env, host)
        end = time.time()
        dur = end - start

        time_key = 't:' + reqid
        self.redis.setex(time_key, self.throttle_expire_secs, dur)

        throttle_samples = int(self.redis.get('throttle_samples'))
        print('INIT DUR: ' + str(dur))
        self.redis.lpush('init_timings', time_key)
        self.redis.ltrim('init_timings', 0, throttle_samples - 1)

        return info

    def new_container(self, browser_id, env=None, default_host=None):
        #browser = self.browsers.get(browser_id)
        browser = self.load_browser(browser_id)

        # get default browser
        if not browser:
            browser = self.load_browser(browser_id)
            #browser = self.browsers.get(self.default_browser)

        if browser.get('req_width'):
            env['SCREEN_WIDTH'] = browser.get('req_width')

        if browser.get('req_height'):
            env['SCREEN_HEIGHT'] = browser.get('req_height')

        image = browser['tags'][0]
        print('Launching ' + image)

        short_id = None

        try:
            host_config = self.create_host_config()

            container = self.cli.create_container(
                image=image,
                ports=list(self.ports.values()),
                environment=env,
                runtime="nvidia",
                host_config=host_config,
                labels={self.label_name: self.name},
            )
            #container = self.cli.create_container(image=image,
            #                                      ports=list(self.ports.values()),
            #                                      environment=env,
            #                                      host_config=host_config,
            #                                      labels={self.label_name: self.name},
            #                                      )
            id_ = container.get('Id')
            short_id = self.sid(id_)

            res = self.cli.start(container=id_)

            info = self.cli.inspect_container(id_)
            ip = info['NetworkSettings']['IPAddress']
            if not ip:
                ip = info['NetworkSettings']['Networks'][
                    self.network_name]['IPAddress']

            self.redis.hset('all_containers', short_id, ip)

            result = {}

            for port_name in self.ports:
                result[port_name + '_host'] = self._get_host_port(
                    info, self.ports[port_name], default_host)

            result['id'] = short_id
            result['ip'] = ip
            result['audio'] = os.environ.get('AUDIO_TYPE', '')
            return result

        except Exception as e:
            traceback.print_exc()
            if short_id:
                print('EXCEPTION: ' + short_id)
                self.remove_container(short_id)

            return {}

    def create_host_config(self):
        if self.volume_source:
            volumes_from = [self.volume_source]
        else:
            volumes_from = None

        host_config = self.cli.create_host_config(
            binds={
                '/tmp/.X11-unix/X0': {
                    'bind': '/tmp/.X11-unix/X0',
                    'ro': False
                },
            },
            port_bindings=self.port_bindings,
            volumes_from=volumes_from,
            network_mode=self.network_name,
            shm_size=self.shm_size,
            cap_add=['ALL'],
            security_opt=['apparmor=unconfined'],
            privileged=True,
            runtime="nvidia",
        )
        return host_config

    def remove_container(self, short_id):
        print('REMOVING: ' + short_id)
        try:
            self.cli.remove_container(short_id, force=True)
        except Exception as e:
            print(e)

        reqid = None
        ip = self.redis.hget('all_containers', short_id)
        if ip:
            reqid = self.redis.hget('ip:' + ip, 'reqid')

        with redis.utils.pipeline(self.redis) as pi:
            pi.delete('ct:' + short_id)

            if not ip:
                return

            pi.hdel('all_containers', short_id)
            pi.delete('ip:' + ip)
            if reqid:
                pi.delete('req:' + reqid)

    def event_loop(self):
        for event in self.cli.events(decode=True):
            try:
                self.handle_docker_event(event)
            except Exception as e:
                print(e)

    def handle_docker_event(self, event):
        if event['Type'] != 'container':
            return

        if (event['status'] == 'die'
                and event['from'].startswith(self.browser_image_prefix)
                and event['Actor']['Attributes'].get(
                    self.label_name) == self.name):

            short_id = self.sid(event['id'])
            print('EXITED: ' + short_id)

            self.remove_container(short_id)
            self.redis.decr('num_containers')
            return

        if (event['status'] == 'start'
                and event['from'].startswith(self.browser_image_prefix)
                and event['Actor']['Attributes'].get(
                    self.label_name) == self.name):

            short_id = self.sid(event['id'])
            print('STARTED: ' + short_id)

            self.redis.incr('num_containers')
            self.redis.setex('ct:' + short_id, self.duration, 1)
            return

    def remove_expired_loop(self):
        while True:
            try:
                self.remove_expired()
            except Exception as e:
                print(e)

            time.sleep(self.remove_expired_secs)

    def remove_expired(self):
        all_known_ids = self.redis.hkeys('all_containers')

        all_containers = {
            self.sid(c['Id'])
            for c in self.cli.containers(quiet=True)
        }

        for short_id in all_known_ids:
            if not self.redis.get('ct:' + short_id):
                print('TIME EXPIRED: ' + short_id)
                self.remove_container(short_id)
            elif short_id not in all_containers:
                print('STALE ID: ' + short_id)
                self.remove_container(short_id)

    def auto_adjust_max(self):
        print('Auto-Adjust Max Loop')
        try:
            scale = self.redis.get('cpu_auto_adjust')
            if not scale:
                return

            info = self.cli.info()
            cpus = int(info.get('NCPU', 0))
            if cpus <= 1:
                return

            total = int(float(scale) * cpus)
            self.redis.set('max_containers', total)

        except Exception as e:
            traceback.print_exc()

    def add_new_client(self, reqid):
        client_id = self.redis.incr('clients')
        #enc_id = base64.b64encode(os.urandom(27)).decode('utf-8')
        self.redis.setex('cm:' + reqid, self.queue_expire_secs, client_id)
        self.redis.setex('q:' + str(client_id), self.queue_expire_secs, 1)
        return client_id

    def _make_reqid(self):
        return base64.b32encode(os.urandom(15)).decode('utf-8')

    def _make_vnc_pass(self):
        return base64.b64encode(os.urandom(21)).decode('utf-8')

    def register_request(self, container_data):
        reqid = self._make_reqid()

        container_data['reqid'] = reqid

        self.redis.hmset('req:' + reqid, container_data)
        self.redis.expire('req:' + reqid, self.init_req_expire_secs)
        return reqid

    def am_i_next(self, reqid):
        client_id = self.redis.get('cm:' + reqid)

        if not client_id:
            client_id = self.add_new_client(reqid)
        else:
            self.redis.expire('cm:' + reqid, self.queue_expire_secs)

        client_id = int(client_id)
        next_client = int(self.redis.get('next_client'))

        # not next client
        if client_id != next_client:
            # if this client expired, delete it from queue
            if not self.redis.get('q:' + str(next_client)):
                print('skipping expired', next_client)
                self.redis.incr('next_client')

            # missed your number somehow, get a new one!
            if client_id < next_client:
                client_id = self.add_new_client(reqid)

        diff = client_id - next_client

        if self.throttle():
            self.redis.expire('q:' + str(client_id), self.queue_expire_secs)
            return client_id - next_client

        #num_containers = self.redis.hlen('all_containers')
        num_containers = int(self.redis.get('num_containers'))

        max_containers = self.redis.get('max_containers')
        max_containers = int(
            max_containers) if max_containers else self.max_containers

        if diff <= (max_containers - num_containers):
            self.redis.incr('next_client')
            return -1

        else:
            self.redis.expire('q:' + str(client_id), self.queue_expire_secs)
            return client_id - next_client

    def throttle(self):
        timings = self.redis.lrange('init_timings', 0, -1)
        if not timings:
            return False

        timings = self.redis.mget(*timings)

        avg = 0
        count = 0
        for val in timings:
            if val is not None:
                avg += float(val)
                count += 1

        if count == 0:
            return False

        avg = avg / count

        print('AVG: ', avg)
        throttle_max_avg = float(self.redis.get('throttle_max_avg'))
        if avg >= throttle_max_avg:
            print('Throttling, too slow...')
            return True

        return False

    def _copy_env(self, env, name, override=None):
        env[name] = override or os.environ.get(name)

    def init_new_browser(self, reqid, host, width=None, height=None):
        req_key = 'req:' + reqid

        container_data = self.redis.hgetall(req_key)

        if not container_data:
            return None

        # already started, attempt to reconnect
        if 'queue' in container_data:
            container_data['ttl'] = self.redis.ttl('ct:' +
                                                   container_data['id'])
            return container_data

        queue_pos = self.am_i_next(reqid)

        if queue_pos >= 0:
            return {'queue': queue_pos}

        browser = container_data['browser']
        url = container_data.get('url', 'about:blank')
        ts = container_data.get('request_ts')

        env = {}

        env['URL'] = url
        env['TS'] = ts
        env['BROWSER'] = browser

        vnc_pass = self._make_vnc_pass()
        env['VNC_PASS'] = vnc_pass

        self._copy_env(env, 'PROXY_HOST')
        self._copy_env(env, 'PROXY_PORT')
        self._copy_env(env, 'PROXY_GET_CA')
        self._copy_env(env, 'SCREEN_WIDTH', width)
        self._copy_env(env, 'SCREEN_HEIGHT', height)
        self._copy_env(env, 'IDLE_TIMEOUT')
        self._copy_env(env, 'AUDIO_TYPE')

        info = self.timed_new_container(browser, env, host, reqid)
        info['queue'] = 0
        info['vnc_pass'] = vnc_pass

        new_key = 'ip:' + info['ip']

        # TODO: support different durations?
        self.duration = int(self.redis.get('container_expire_secs'))

        with redis.utils.pipeline(self.redis) as pi:
            pi.rename(req_key, new_key)
            pi.persist(new_key)

            pi.hmset(req_key, info)
            pi.expire(req_key, self.duration)

        info['ttl'] = self.duration
        return info

    def get_random_browser(self):
        browsers = self.load_avail_browsers()
        while True:
            id_ = random.choice(browsers.keys())
            if browsers[id_].get('skip_random'):
                continue

            return id_