Beispiel #1
0
  def __init__(self, config):
    # Logging.
    self.logger = logging.getLogger(__name__)

    # The overall configuration.
    self.config = config

    # The proxy being used to talk to HAProxy.
    self.proxy = Proxy()

    # The components, by name.
    self.components = {}

    # Build the components map.
    for component_config in config.components:
      self.components[component_config.name] = Component(self, component_config)

    # Create the lock for the watcher thread and the notification event.
    self.watcher_lock = threading.Lock()
    self.watcher_event = threading.Event()

    # The set of containers which should be terminated by the terminating workers.
    self.containers_to_terminate = Queue()

    # Start the thread used to watch and stop containers that are no longer needed.
    self.pool = ThreadPool()

    # Place to collect the results of the monitor
    self.monitor_futures = Queue()
Beispiel #2
0
  def __init__(self, config, daemon_mode = False):
    # Logging.
    self.logger = logging.getLogger(__name__)

    # Whether gantry is running in daemon mode.
    self.daemon_mode = daemon_mode
    
    # The overall configuration.
    self.config = config
    
    # The proxy being used to talk to HAProxy.
    self.proxy = Proxy()
    
    # The components, by name.
    self.components = {}
    
    # Build the components map.
    for component_config in config.components:
      self.components[component_config.name] = Component(self, component_config)
    
    # Create the lock for the watcher thread and the notification event.
    self.watcher_lock = threading.Lock()
    self.watcher_event = threading.Event()

    # The set of containers watched. Must be accessed under the watcher_lock.
    # When the connections to the ports used by a container are not longer there,
    # then the container is stopped.
    self.containers_watched = []
    
    # Start the thread used to watch and stop containers that are no longer needed.
    self.watcher_thread = threading.Thread(target = self.checkProxy, args = [])
    self.watcher_thread.daemon = True
    self.watcher_thread.start()
Beispiel #3
0
  def run(self, container, report):
    container_ip = self.getContainerIPAddress(container)

    for connection in Proxy.get_connections():
      if not connection.laddr or not connection.raddr:
        continue

      if connection.raddr[0] == container_ip:
        report('Container still has existing connections: %s' % container['Id'][0:12],
               level=ReportLevels.EXTRA)
        return False

    report('Container has no remaining connections: %s' % container['Id'][0:12],
           level=ReportLevels.EXTRA)
    return True
Beispiel #4
0
class RuntimeManager(object):
  """ Manager class which handles tracking of all the components and other runtime
      information.
  """
  def __init__(self, config, daemon_mode = False):
    # Logging.
    self.logger = logging.getLogger(__name__)

    # Whether gantry is running in daemon mode.
    self.daemon_mode = daemon_mode
    
    # The overall configuration.
    self.config = config
    
    # The proxy being used to talk to HAProxy.
    self.proxy = Proxy()
    
    # The components, by name.
    self.components = {}
    
    # Build the components map.
    for component_config in config.components:
      self.components[component_config.name] = Component(self, component_config)
    
    # Create the lock for the watcher thread and the notification event.
    self.watcher_lock = threading.Lock()
    self.watcher_event = threading.Event()

    # The set of containers watched. Must be accessed under the watcher_lock.
    # When the connections to the ports used by a container are not longer there,
    # then the container is stopped.
    self.containers_watched = []
    
    # Start the thread used to watch and stop containers that are no longer needed.
    self.watcher_thread = threading.Thread(target = self.checkProxy, args = [])
    self.watcher_thread.daemon = True
    self.watcher_thread.start()
    
  def getComponent(self, name):
    """ Returns the component with the given name defined or None if none. """
    if not name in self.components:
      return None
      
    return self.components[name]

  def adjustForUpdatingComponent(self, component, started_container):
    """ Adjusts the runtime for a component which has been started in the given
        container.
    """
    self.logger.debug('Adjusting runtime for updating component: %s', component.getName())
    self.updateProxy()
   
  def adjustForStoppingComponent(self, component):
    """ Adjusts the runtime for a component which has been stopped.
    """
    self.logger.debug('Adjusting runtime for stopped component: %s', component.getName())
    self.updateProxy()
    
  def findConnectionLessContainers(self, containers):
    """ Determines which containers no longer have any valid connections to the
        outside world.
    """
    # Build the set of active connections from all the running proxy processes.
    connections = self.proxy.get_connections()
    
    # Build the set of container-local ports used.
    ports = set([])
    for connection in connections:
      laddr = connection.laddr
      raddr = connection.raddr
      if not laddr or not raddr:
        continue
      
      laddress = laddr[0]
      raddress = raddr[0]
      lport = laddr[1]
      rport = raddr[1]
      
      if laddress == '127.0.0.1' and raddress == '127.0.0.1':
        ports.add(rport)

    # For each draining container, if the port set contains one of the known mappings, then
    # the container is still being used.
    connectionless = list(containers)
    for container in containers:
      if getContainerStatus(container) == 'draining':
        container_local_ports = containerutil.getLocalPorts(container)
        if ports.intersection(container_local_ports):
          connectionless.remove(container)

    return connectionless
        
  def checkProxy(self):
    """ Checks to see if a draining container can be shutdown. """
    counter = 0
    client = getDockerClient()
    while True:
      # Wait until something of interest is avaliable to check.
      self.watcher_event.wait()
      self.watcher_event.clear()
      
      while True:
        # Load the containers to check (under the lock).
        containers = None
        with self.watcher_lock:
          containers = list(self.containers_watched)
        
        # If none, we're done.
        if not containers:
          break
        
        # Find the containers that no longer need to be running. Any container with no
        # valid connections coming in and a status of 'draining', can be shutdown.
        report('Monitor check started', level = ReportLevels.BACKGROUND)
        containers_to_shutdown = self.findConnectionLessContainers(containers)
        if len(containers_to_shutdown) > 0:
          with self.watcher_lock:
            for container in containers_to_shutdown:
              self.containers_watched.remove(container)

          for container in containers_to_shutdown:
            setContainerStatus(container, 'shutting-down')
            report('Shutting down container: ' + container['Id'][0:12], level = ReportLevels.BACKGROUND)
            client.stop(container)
            removeContainerMetadata(container)
        
        # Determine how many residual containers are left over.
        difference = len(containers) - len(containers_to_shutdown)
        if difference > 0:
          report(str(difference) + ' additional containers to monitor. Sleeping for 10 seconds', level = ReportLevels.BACKGROUND)            
          time.sleep(10)
          counter = counter + 1
      
      report('Monitor check finished', level = ReportLevels.BACKGROUND)
      if not self.daemon_mode:
        # Quit now that we're done.
        return
    
  def updateProxy(self):
    """ Updates the proxy used for port mapping to conform to the current running container
        list.
    """
    client = getDockerClient()
    
    # Clear all routes in the proxy.
    # TODO: When this is in daemon mode, don't need do this. We could selectively
    # edit it instead.
    self.proxy.clear_routes()
    
    # Add routes for the non-draining containers and collect the draining containers to
    # watch.
    report('Finding running containers...', level = ReportLevels.EXTRA)
    draining_containers = []
    starting_containers = []
    
    for component in self.components.values():
      for container in component.getAllContainers(client):
        if getContainerStatus(container) != 'draining':
          starting_containers.append(container)
          for mapping in component.config.ports:
            local_port = containerutil.getLocalPort(client, container, mapping.container)
            route = Route(mapping.kind == 'http', mapping.external, 'localhost', local_port)
            self.proxy.add_route(route)
        else:
          draining_containers.append(container)

    # Commit the changes to the proxy.
    if draining_containers or starting_containers:
      report('Updating proxy...', level = ReportLevels.EXTRA)
      self.proxy.commit()
    else:
      report('Shutting down proxy...', level = ReportLevels.EXTRA)
      self.proxy.shutdown()
    
    # Mark the starting containers as running.
    for container in starting_containers:
      setContainerStatus(container, 'running')

    if draining_containers:
      report('Starting monitoring...', level = ReportLevels.EXTRA)
    
    # If there are any draining containers, add them to the watcher thread.
    with self.watcher_lock:
      self.containers_watched.extend(draining_containers)
    
    # Call the event to wakeup the watcher thread.
    if draining_containers:
      self.watcher_event.set()    
    
    # If in local mode, then wait until all the containers have drained. This
    # prevents the python debugger from shutting down, since the other threads
    # are all daemon threads.
    if not self.daemon_mode and draining_containers:
      while True:
        self.watcher_thread.join(10)
        if not self.watcher_thread.isAlive():
          break
Beispiel #5
0
class RuntimeManager(object):
  """ Manager class which handles tracking of all the components and other runtime
      information.
  """
  def __init__(self, config):
    # Logging.
    self.logger = logging.getLogger(__name__)

    # The overall configuration.
    self.config = config

    # The proxy being used to talk to HAProxy.
    self.proxy = Proxy()

    # The components, by name.
    self.components = {}

    # Build the components map.
    for component_config in config.components:
      self.components[component_config.name] = Component(self, component_config)

    # Create the lock for the watcher thread and the notification event.
    self.watcher_lock = threading.Lock()
    self.watcher_event = threading.Event()

    # The set of containers which should be terminated by the terminating workers.
    self.containers_to_terminate = Queue()

    # Start the thread used to watch and stop containers that are no longer needed.
    self.pool = ThreadPool()

    # Place to collect the results of the monitor
    self.monitor_futures = Queue()

  def getComponent(self, name):
    """ Returns the component with the given name defined or None if none. """
    if not name in self.components:
      return None

    return self.components[name]

  def lookupComponentLink(self, link_name):
    """ Looks up the component link with the given name defined or None if none. """
    for component_name, component in self.components.items():
      defined_links = component.config.getDefinedComponentLinks()
      if link_name in defined_links:
        return ComponentLinkInformation(self, component, defined_links[link_name])

    return None

  def adjustForUpdatingComponent(self, component, started_container):
    """ Adjusts the runtime for a component which has been started in the given
        container.
    """
    self.logger.debug('Adjusting runtime for updating component: %s', component.getName())
    self.updateProxy()

  def adjustForStoppingComponent(self, component):
    """ Adjusts the runtime for a component which has been stopped.
    """
    self.logger.debug('Adjusting runtime for stopped component: %s', component.getName())
    self.updateProxy()


  def watchTermination(self, container, component):
    report('Monitor check started', level=ReportLevels.BACKGROUND)

    client = getDockerClient()

    # Send the termination signal(s) to the container
    signals = []

    for signal in component.config.termination_signals:
      signals.append((signal, buildTerminationSignal(signal)))

    report('Sending %s termination signals' % len(signals), component=component)

    for (config, signal) in signals:
      report('Sending termination signal: ' + config.getTitle(), component=component)
      result = signal.run(container, report)
      if not result:
        report('Termination signal failed', component=component)

    # Now wait until all of the termination conditions are met
    checks = []
    for check in component.config.termination_checks:
      checks.append((check, buildHealthCheck(check)))

    report('Waiting for %s termination checks' % len(checks), component=component)

    for (config, check) in checks:
      check_passed = False

      while not check_passed:
        report('Running termination check: ' + config.getTitle(), component=component)
        result = check.run(container, report)
        if not result:
          report('Termination check failed', component=component)

          report('Sleeping ' + str(config.timeout) + ' second(s)...', component=component)
          time.sleep(config.timeout)
        else:
          check_passed = True

    report('Monitor check finished', level=ReportLevels.BACKGROUND)

    setContainerStatus(container, 'shutting-down')
    report('Shutting down container: ' + container['Id'][0:12], level=ReportLevels.BACKGROUND)
    client.stop(container)
    removeContainerMetadata(container)


  def terminateContainer(self, container, component):
    """ Adds the given container to the list of containers which should be terminated.
    """
    report('Terminating container: %s' % container['Id'][:12], component=component)
    self.monitor_futures.put(self.pool.apply_async(self.watchTermination, (container, component)))


  def updateProxy(self):
    """ Updates the proxy used for port mapping to conform to the current running container
        list.
    """
    client = getDockerClient()

    # Clear all routes in the proxy.
    # TODO: When this is in daemon mode, don't need do this. We could selectively
    # edit it instead.
    self.proxy.clear_routes()

    # Add routes for the non-draining containers and collect the draining containers to
    # watch.
    report('Finding running containers...', level=ReportLevels.EXTRA)
    draining_containers = []
    starting_containers = []

    for component in self.components.values():
      for container in component.getAllContainers(client):
        if getContainerStatus(container) != 'draining':
          container_ip = containerutil.getContainerIPAddress(client, container)
          starting_containers.append(container)

          # Add the normal exposed ports.
          for mapping in component.config.ports:
            route = Route(mapping.kind == 'http', mapping.external, container_ip,
                          mapping.container)
            self.proxy.add_route(route)

          # Add the container link ports.
          for link in component.config.defined_component_links:
            route = Route(link.kind == 'http', link.getHostPort(), container_ip, link.port)
            self.proxy.add_route(route)
        else:
          draining_containers.append(container)

    # Commit the changes to the proxy.
    if draining_containers or starting_containers:
      report('Updating proxy...', level=ReportLevels.EXTRA)
      self.proxy.commit()
    else:
      report('Shutting down proxy...', level=ReportLevels.EXTRA)
      self.proxy.shutdown()

    # Mark the starting containers as running.
    for container in starting_containers:
      setContainerStatus(container, 'running')

  def join(self):
    self.pool.close()

    while not self.monitor_futures.empty():
      # If any of the futures threw and exception we'll get it now
      self.monitor_futures.get().get()

    self.pool.join()