def matches_definition(self, node_definition):
     logger.info("Matching %s against definition %s" % (self, node_definition))
     return self.boto_instance.image_id == node_definition.ami_id and \
     self.boto_instance.instance_type == node_definition.size and \
     self._tag('credentials_name') == node_definition.credentials_name and \
     self.boto_instance.region.name == node_definition.region and \
     self.get_services().keys() == node_definition.services
Beispiel #2
0
 def service_terminated(self, service_name, node):
     conn = self.get_connection_for_region(node.region().name)
     try:
         load_balancer = conn.get_all_load_balancers(load_balancer_names=[self.elb_name])[0]
         logger.info('Existing load balancer found with name %s' %self.elb_name)
         load_balancer.degregister_instances([node.id()])
     except:
         pass
Beispiel #3
0
def launch(env_template=None, env_name=None, config_dir=DEFAULT_ENVIRONMENT, noop=False, property_file=None):
    """Launches a new environment, or applies changes made to an existing environment"""
    with env_conf_from_dir(config_dir, env_name, property_file, noop=noop) as env_defs:
        if noop:
            logger.info("Running in NOOP mode - no changes will be made to your system")

        environment_definition = env_defs[env_template]
        environment_definition.launch()
        if noop:
            print environment_definition.node_provider.noop_actions_string()
        else:
            print describe_running_environment(config_dir, environment_definition, env_name, env_template, TextTableEnvironmentDescriber())

        return environment_definition
    def create_security_group_if_it_does_not_exists(self, service_name):
        """
        Creates a security group for the given service name if that service group does not already exist
        returns the fully qualified name of the security group
        """
        security_group_name = self._get_sec_group_name(service_name)

        sec_groups = self.connection.get_all_security_groups()
        if not len([x for x in sec_groups if x.name == security_group_name]):
            logger.info("Creating new Security Group %s" % security_group_name)
            self.connection.create_security_group(security_group_name, "dynamically created security group")
        else:
            logger.info("Security Group %s already exists" % security_group_name)

        return security_group_name
    def wait_for_ready(self, callback, start_up_timeout=90):
        logger.info("Waiting for node %s to be ready" % self.id())
        start = time.time()
        node_is_up = False
        while time.time() - start <= start_up_timeout :
            self.boto_instance.update()
            if self.state() == 'running' and self.boto_instance.ip_address is not None:
                node_is_up = self.connection_provider.connected_to_node(self.boto_instance.ip_address, 22)
                if node_is_up :
                    logger.info("*********Node %s is ready!********" % self.id())
                    break
            else:
                logger.debug("Waiting for 5 seconds for node %s" % self.id())
                sleep(5)

        if not node_is_up :
            raise Exception("Node %s is not running" % self.id())

        # For some reason, with the Ubuntu instances we use, if we try and install packages too quickly after the machine
        # boots, we don't get the transient dependencies - highly annoying.
        sleep(10)
        callback()
    def launch(self):
        node_defs_to_provision, services_to_already_launched_nodes, running_nodes_to_terminate = self.delta_defs_with_running_nodes(
            self.node_definitions)

        # AWS Problems:
        #  1. Shutting down instances in a different environment
        #  2. Not matching existing instances
        # TODO - Should log gracefully during lifecycle events....
        logger.info("Shutting down instances %s" % [n.id() for n in running_nodes_to_terminate])
        logger.info("Launching new instances %s" % node_defs_to_provision)

        services_to_newly_launched_nodes = self._provision_nodes(node_defs_to_provision, blocking=True)
        map(lambda n: self.node_provider.shutdown(n.id()), running_nodes_to_terminate)

        services_to_all_running_nodes = self.merge_service_to_nodes_dicts(services_to_already_launched_nodes,
            services_to_newly_launched_nodes)

        self.tag_nodes_with_services(services_to_all_running_nodes)

        env_settings = self.build_environment_settings(services_to_all_running_nodes)
        logger.debug("settings: %s" % env_settings)
        self.configure_services(services_to_all_running_nodes, env_settings)
 def wait_for_ready(self, callback, start_up_timeout=45):
     logger.info("Waiting for node %s to be ready" % self.id())
     start = time.time()
     succeeded = False
     while time.time() - start <= start_up_timeout:
         try:
             succeeded = self.ssh_command_helper.run_command_silently("ping -c1 %s" % self.node_id).succeeded
         except:
             pass
         if succeeded :
             logger.info("*********Node %s is ready!*********" % self.id())
             break
         else:
             logger.info("Node %s not yet ready, checking again in 3 seconds" % self.id())
             time.sleep(3)
     if not succeeded :
         raise Exception("Node %s is not running" % self.node_id)
     callback()
Beispiel #8
0
    def service_installed(self, service_name, node, connectivities):
        conn = self.get_connection_for_region(node.region().name)
        try:
            load_balancer = conn.get_all_load_balancers(load_balancer_names=[self.elb_name])[0]
            logger.info('Existing load balancer found with name %s' %self.elb_name)
        except:
            load_balancer = None

        if load_balancer is None:
            logger.info('Creating a new load balancer: '+ self.elb_name)
            elb_ports = self.get_elb_mappings(connectivities)
            load_balancer = conn.create_load_balancer(self.elb_name, str(node.placement()), elb_ports)

        health_check_address = self.app_healthcheck_target
        hc = HealthCheck(
            interval=20,
            healthy_threshold=3,
            unhealthy_threshold=5,
            target=health_check_address
        )
        load_balancer.configure_health_check(hc)
        logger.info('Registering node id %s for availability zone %s' %(node.id(), node.placement()))
        load_balancer.enable_zones([node.placement()])
        load_balancer.register_instances([node.id()])
 def run_command(self, command, warn_only=False):
     with settings(warn_only = warn_only):
         logger.info("Running on node %s" % str(self))
         return self.ssh_command_helper.run_command("sudo ssh -i /root/.ssh/id_rsa -oStrictHostKeyChecking=no root@%s '%s'" % (self.node_id, command))
 def tearDown(self):
     for env_template, env_name in self.env_to_shut_down.items() :
         logger.info("Terminating environment: template %s, name %s" % (env_template, env_name))
         logger.info(commands.getoutput("cd phoenix && ./pho terminate_environment --env_template %s --env_name %s --property_file %s" % (env_template, env_name, "../build_credentials/phoenix.ini")))
     self.env_to_shut_down.clear()