def update(self): """ Updates a running instance of the component. Returns True on success and False otherwise. """ self.logger.debug('Updating component %s', self.getName()) client = getDockerClient() # Get the list of currently running container(s). existing_containers = self.getAllContainers(client) existing_primary = self.getPrimaryContainer() # Start the new instance. container = self.start() if not container: return False # Mark all the existing containers as draining. for existing in existing_containers: setContainerStatus(existing, 'draining') # Update the port proxy to redirect the external ports to the new # container. report('Redirecting traffic to new container', component=self) self.manager.adjustForUpdatingComponent(self, container) # Signal the existing primary container to terminate if existing_primary is not None: self.manager.terminateContainer(existing_primary, self) return True
def update(self): """ Updates a running instance of the component. Returns True on success and False otherwise. """ self.logger.debug("Updating component %s", self.getName()) client = getDockerClient() # Get the list of currently running container(s). existing_containers = self.getAllContainers(client) existing_primary = self.getPrimaryContainer() # Start the new instance. container = self.start() if not container: return False # Mark all the existing containers as draining. for existing in existing_containers: setContainerStatus(existing, "draining") # Signal the existing primary container to terminate self.elb_manager.deregisterContainer() if existing_primary is not None: self.manager.terminateContainer(existing_primary, self) return True
def updateProxy(self): """ Updates the proxy used for port mapping to conform to the current running container list. """ client = getDockerClient() # Clear all routes in the proxy. # TODO: When this is in daemon mode, don't need do this. We could selectively # edit it instead. self.proxy.clear_routes() # Add routes for the non-draining containers and collect the draining containers to # watch. report('Finding running containers...', level = ReportLevels.EXTRA) draining_containers = [] starting_containers = [] for component in self.components.values(): for container in component.getAllContainers(client): if getContainerStatus(container) != 'draining': starting_containers.append(container) for mapping in component.config.ports: local_port = containerutil.getLocalPort(client, container, mapping.container) route = Route(mapping.kind == 'http', mapping.external, 'localhost', local_port) self.proxy.add_route(route) else: draining_containers.append(container) # Commit the changes to the proxy. if draining_containers or starting_containers: report('Updating proxy...', level = ReportLevels.EXTRA) self.proxy.commit() else: report('Shutting down proxy...', level = ReportLevels.EXTRA) self.proxy.shutdown() # Mark the starting containers as running. for container in starting_containers: setContainerStatus(container, 'running') if draining_containers: report('Starting monitoring...', level = ReportLevels.EXTRA) # If there are any draining containers, add them to the watcher thread. with self.watcher_lock: self.containers_watched.extend(draining_containers) # Call the event to wakeup the watcher thread. if draining_containers: self.watcher_event.set() # If in local mode, then wait until all the containers have drained. This # prevents the python debugger from shutting down, since the other threads # are all daemon threads. if not self.daemon_mode and draining_containers: while True: self.watcher_thread.join(10) if not self.watcher_thread.isAlive(): break
def start(self): """ Starts a new instance of the component. Note that this does *not* update the proxy. """ client = getDockerClient() self.logger.debug('Starting container for component %s', self.getName()) # Ensure that we have the image. If not, we try to download it. self.ensureImage(client) # Start the instance with the proper image ID. container = self.createContainer(client) report('Starting container ' + container['Id'][:12], component=self) if self.config.privileged: report('Container will be run in privileged mode', component=self) client.start(container, binds=self.config.getBindings(container['Id']), volumes_from=self.config.volumes_from, privileged=self.config.privileged) # Health check until the instance is ready. report('Waiting for health checks...', component=self) # Start a health check thread to determine when the component is ready. timeout = self.config.getReadyCheckTimeout() readycheck_thread = Thread(target=self.readyCheck, args=[container, timeout]) readycheck_thread.daemon = True readycheck_thread.start() # Wait for the health thread to finish. readycheck_thread.join(self.config.getReadyCheckTimeout()) # If the thread is still alived, then our join timed out. if readycheck_thread.isAlive(): report( 'Timed out waiting for health checks. Stopping container...', component=self) client.stop(container) report('Container stopped', component=self) return None # Otherwise, the container is ready. Set it as starting. setContainerComponent(container, self.getName()) setContainerStatus(container, 'starting') return container
def updateProxy(self): """ Updates the proxy used for port mapping to conform to the current running container list. """ client = getDockerClient() # Clear all routes in the proxy. # TODO: When this is in daemon mode, don't need do this. We could selectively # edit it instead. self.proxy.clear_routes() # Add routes for the non-draining containers and collect the draining containers to # watch. report('Finding running containers...', level=ReportLevels.EXTRA) draining_containers = [] starting_containers = [] for component in self.components.values(): for container in component.getAllContainers(client): if getContainerStatus(container) != 'draining': container_ip = containerutil.getContainerIPAddress(client, container) starting_containers.append(container) # Add the normal exposed ports. for mapping in component.config.ports: route = Route(mapping.kind == 'http', mapping.external, container_ip, mapping.container) self.proxy.add_route(route) # Add the container link ports. for link in component.config.defined_component_links: route = Route(link.kind == 'http', link.getHostPort(), container_ip, link.port) self.proxy.add_route(route) else: draining_containers.append(container) # Commit the changes to the proxy. if draining_containers or starting_containers: report('Updating proxy...', level=ReportLevels.EXTRA) self.proxy.commit() else: report('Shutting down proxy...', level=ReportLevels.EXTRA) self.proxy.shutdown() # Mark the starting containers as running. for container in starting_containers: setContainerStatus(container, 'running')
def watchTermination(self, container, component): report('Monitor check started', level=ReportLevels.BACKGROUND) client = getDockerClient() # Send the termination signal(s) to the container signals = [] for signal in component.config.termination_signals: signals.append((signal, buildTerminationSignal(signal))) report('Sending %s termination signals' % len(signals), component=component) for (config, signal) in signals: report('Sending termination signal: ' + config.getTitle(), component=component) result = signal.run(container, report) if not result: report('Termination signal failed', component=component) # Now wait until all of the termination conditions are met checks = [] for check in component.config.termination_checks: checks.append((check, buildHealthCheck(check))) report('Waiting for %s termination checks' % len(checks), component=component) for (config, check) in checks: check_passed = False while not check_passed: report('Running termination check: ' + config.getTitle(), component=component) result = check.run(container, report) if not result: report('Termination check failed', component=component) report('Sleeping ' + str(config.timeout) + ' second(s)...', component=component) time.sleep(config.timeout) else: check_passed = True report('Monitor check finished', level=ReportLevels.BACKGROUND) setContainerStatus(container, 'shutting-down') report('Shutting down container: ' + container['Id'][0:12], level=ReportLevels.BACKGROUND) client.stop(container) removeContainerMetadata(container)
def checkProxy(self): """ Checks to see if a draining container can be shutdown. """ counter = 0 client = getDockerClient() while True: # Wait until something of interest is avaliable to check. self.watcher_event.wait() self.watcher_event.clear() while True: # Load the containers to check (under the lock). containers = None with self.watcher_lock: containers = list(self.containers_watched) # If none, we're done. if not containers: break # Find the containers that no longer need to be running. Any container with no # valid connections coming in and a status of 'draining', can be shutdown. report('Monitor check started', level = ReportLevels.BACKGROUND) containers_to_shutdown = self.findConnectionLessContainers(containers) if len(containers_to_shutdown) > 0: with self.watcher_lock: for container in containers_to_shutdown: self.containers_watched.remove(container) for container in containers_to_shutdown: setContainerStatus(container, 'shutting-down') report('Shutting down container: ' + container['Id'][0:12], level = ReportLevels.BACKGROUND) client.stop(container) removeContainerMetadata(container) # Determine how many residual containers are left over. difference = len(containers) - len(containers_to_shutdown) if difference > 0: report(str(difference) + ' additional containers to monitor. Sleeping for 10 seconds', level = ReportLevels.BACKGROUND) time.sleep(10) counter = counter + 1 report('Monitor check finished', level = ReportLevels.BACKGROUND) if not self.daemon_mode: # Quit now that we're done. return
def start(self): """ Starts a new instance of the component. Note that this does *not* update the proxy. """ client = getDockerClient() self.logger.debug("Starting container for component %s", self.getName()) # Ensure that we have the image. If not, we try to download it. self.ensureImage(client) # Start the instance with the proper image ID. container = self.createContainer(client) report("Starting container " + container["Id"][:12], component=self) if self.config.privileged: report("Container will be run in privileged mode", component=self) client.start( container, binds=self.config.getBindings(container["Id"]), volumes_from=self.config.volumes_from, privileged=self.config.privileged, ) # Health check until the instance is ready. report("Waiting for health checks...", component=self) # Start a health check thread to determine when the component is ready. timeout = self.config.getReadyCheckTimeout() readycheck_thread = Thread(target=self.readyCheck, args=[container, timeout]) readycheck_thread.daemon = True readycheck_thread.start() # Wait for the health thread to finish. readycheck_thread.join(self.config.getReadyCheckTimeout()) # If the thread is still alived, then our join timed out. if readycheck_thread.isAlive(): report("Timed out waiting for health checks. Stopping container...", component=self) client.stop(container) report("Container stopped", component=self) return None # Otherwise, the container is ready. Set it as starting. setContainerComponent(container, self.getName()) setContainerStatus(container, "starting") return container
def stop(self, kill=False): """ Stops all containers for this component. """ if not self.isRunning(): return self.logger.debug('Stopping component %s', self.getName()) client = getDockerClient() # Mark all the containers as draining. report('Draining all containers...', component=self) self.elbManager.deregisterAllContainers() for container in self.getAllContainers(client): setContainerStatus(container, 'draining') self.manager.terminateContainer(container, self) # Kill any associated containers if asked. if kill: for container in self.getAllContainers(client): report('Killing container ' + container['Id'][:12], component=self) client.kill(container) removeContainerMetadata(container)
def stop(self, kill=False): """ Stops all containers for this component. """ if not self.isRunning(): return self.logger.debug("Stopping component %s", self.getName()) client = getDockerClient() # Mark all the containers as draining. report("Draining all containers...", component=self) self.elb_manager.deregisterAllContainers() for container in self.getAllContainers(client): setContainerStatus(container, "draining") self.manager.terminateContainer(container, self) # Kill any associated containers if asked. if kill: for container in self.getAllContainers(client): report("Killing container " + container["Id"][:12], component=self) client.kill(container) removeContainerMetadata(container)
def stop(self, kill = False): """ Stops all containers for this component. """ if not self.isRunning(): return self.logger.debug('Stopping component %s', self.getName()) client = getDockerClient() # Mark all the containers as draining. report('Draining all containers...', component = self) for container in self.getAllContainers(client): setContainerStatus(container, 'draining') # Kill any associated containers if asked. if kill: for container in self.getAllContainers(client): report('Killing container ' + container['Id'][0:12], component = self) client.kill(container) removeContainerMetadata(container) # Clear the proxy and rebuild its routes for the running components. self.manager.adjustForStoppingComponent(self)
def start(self): """ Starts a new instance of the component. Note that this does *not* update the proxy. """ client = getDockerClient() self.logger.debug('Starting container for component %s', self.getName()) # Ensure that we have the image. If not, we try to download it. self.ensureImage(client) # Start the instance with the proper image ID. container = self.createContainer(client) report('Starting container ' + container['Id'], component = self) client.start(container) # Health check until the instance is ready. report('Waiting for health checks...', component = self) # Start a health check thread to determine when the component is ready. timeout = self.config.getReadyCheckTimeout() readycheck_thread = Thread(target = self.readyCheck, args=[container, timeout]) readycheck_thread.daemon = True readycheck_thread.start() # Wait for the health thread to finish. readycheck_thread.join(self.config.getReadyCheckTimeout()) # If the thread is still alived, then our join timed out. if readycheck_thread.isAlive(): report('Timed out waiting for health checks. Stopping container...', component = self) client.stop(container) report('Container stopped', component = self) return None # Otherwise, the container is ready. Set it as starting. setContainerComponent(container, self.getName()) setContainerStatus(container, 'starting') return container