def environment_definitions(directory=None, env_name="Ignored", property_file=None, noop=False): logger.debug("Using property file %s" % property_file) if not env_name: raise StandardError("env_name is a required field") return _definition_from_yaml(directory, "environment_definitions.yaml", partial(environment_definitions_from_yaml, service_definitions=service_defs_from_dir(directory), env_name=env_name, noop=noop, all_credentials=_credentials_from_path(directory)), property_file)
def _bootstrap_puppet(self): puppet_status = self.node.run_command("dpkg-query -W -f='${Status} ${Version}\n' puppet", warn_only=True) if "installed" in puppet_status: logger.debug("Puppet already bootstrapped") else: logger.debug("Bootstrapping puppet") self.node.run_command('sudo apt-get update && sudo apt-get install puppet -y')
def _definition_from_yaml(def_directory, filename, from_yaml_func, property_file=None): conf_path = _conf_file_from_dir(def_directory, filename) config_file = open(conf_path,'r') try: properties_from_file = _properties_from_file(property_file) logger.debug("Loading properties %s" % properties_from_file) templated_yaml = pystache.render(config_file.read(), properties_from_file) logger.debug("Post templated YAML %s" % templated_yaml) return from_yaml_func(templated_yaml) finally: config_file.close()
def wait_for_ready(self, callback, start_up_timeout=90): logger.info("Waiting for node %s to be ready" % self.id()) start = time.time() node_is_up = False while time.time() - start <= start_up_timeout : self.boto_instance.update() if self.state() == 'running' and self.boto_instance.ip_address is not None: node_is_up = self.connection_provider.connected_to_node(self.boto_instance.ip_address, 22) if node_is_up : logger.info("*********Node %s is ready!********" % self.id()) break else: logger.debug("Waiting for 5 seconds for node %s" % self.id()) sleep(5) if not node_is_up : raise Exception("Node %s is not running" % self.id()) # For some reason, with the Ubuntu instances we use, if we try and install packages too quickly after the machine # boots, we don't get the transient dependencies - highly annoying. sleep(10) callback()
def launch(self): node_defs_to_provision, services_to_already_launched_nodes, running_nodes_to_terminate = self.delta_defs_with_running_nodes( self.node_definitions) # AWS Problems: # 1. Shutting down instances in a different environment # 2. Not matching existing instances # TODO - Should log gracefully during lifecycle events.... logger.info("Shutting down instances %s" % [n.id() for n in running_nodes_to_terminate]) logger.info("Launching new instances %s" % node_defs_to_provision) services_to_newly_launched_nodes = self._provision_nodes(node_defs_to_provision, blocking=True) map(lambda n: self.node_provider.shutdown(n.id()), running_nodes_to_terminate) services_to_all_running_nodes = self.merge_service_to_nodes_dicts(services_to_already_launched_nodes, services_to_newly_launched_nodes) self.tag_nodes_with_services(services_to_all_running_nodes) env_settings = self.build_environment_settings(services_to_all_running_nodes) logger.debug("settings: %s" % env_settings) self.configure_services(services_to_all_running_nodes, env_settings)
def run_command(self, command, warn_only=False): with settings(warn_only = warn_only): with self._running_node(): logger.debug("Running on node %s with user %s and key %s" % \ (self._dns_name(), self._admin_user(), self._path_to_private_key())) return run(command)
def _upload_artifacts(self): logger.debug('Uploading artifacts') tar_file = self.service_definition.bundle("puppet_module_directory", "%s_puppet_bundle.tgz" % self.service_definition.name) self.node.upload_file(tar_file, "/tmp") remote_tarfile_path = "/tmp/%s" % os.path.basename(tar_file) self.node.run_command('tar xvfz %s' % remote_tarfile_path)