def list_ips(node_name=None): if not settings.operation_profile: settings.operation_profile = 'master_profile' salt_master = get_profile(settings.operation_profile) data = expose_machines(salt_master) for i in data: try: print "%s <==> %s" % (i.name, i.public_ips[0]) except IndexError as e: print "%s <==> %s" % (i.name, i.public_ips)
def list_nodes(silent=False): if not settings.operation_profile: settings.operation_profile = 'master_profile' salt_master = get_profile(settings.operation_profile) data = expose_machines(salt_master) if not silent: logger.info("Online instances:") inst_list = expose_machines(salt_master) for i in inst_list: logger.info("Instance: %s IP: %s State: %s uuid: %s" % (i.name, i.public_ips, i.state, i.uuid))
def list_nodes(silent=False): if not settings.operation_profile: settings.operation_profile = 'master_profile' salt_master = get_profile(settings.operation_profile) data = expose_machines(salt_master) if not silent: logger.info("Online instances:") inst_list = expose_machines(salt_master) for i in inst_list: logger.info( "Instance: %s IP: %s State: %s uuid: %s" % (i.name, i.public_ips, i.state, i.uuid))
def create(instance_name=None): """ Spin up new instances. Display all running instances. raises: profiles.exceptions.MissingProfileNameError profiles.exceptions.DriverNotSupportedError """ if settings.operation_profile_list: p_list = ','.join(SEED_PROFILES.keys()) for p in p_list: logger.info("Profile: %s" % p) #logger.info("Profiles: [%s]" % ','.join(SEED_PROFILES.keys())) sys.exit() if settings.operation_profile: seed_profile = get_profile(settings.operation_profile) else: settings.operation_profile = "salt_master" seed_profile = aws.master_profile.copy() logger.info("Using default profile: %s " % settings.operation_profile) if not seed_profile: raise SeedProfileDoesNotExistError("%s" % settings.operation_profile) if settings.map_list: parse_map_file(settings.map_list) else: logger.info( "map_file not specified. using existing salt_cloud_map file from resources." ) logger.info("Profile being used: %s" % settings.operation_profile) seed_profile.name = instance_name or settings.name if settings.vpc_subnet: instance_id = seed_vpc(seed_profile, env=settings.vpc_subnet[0]) else: instance_id = seed_machine(seed_profile) water_machines(seed_profile, [instance_id])
def create(instance_name=None): """ Spin up new instances. Display all running instances. raises: profiles.exceptions.MissingProfileNameError profiles.exceptions.DriverNotSupportedError """ if settings.operation_profile_list: p_list = ','.join(SEED_PROFILES.keys()) for p in p_list: logger.info("Profile: %s" % p) #logger.info("Profiles: [%s]" % ','.join(SEED_PROFILES.keys())) sys.exit() if settings.operation_profile: seed_profile = get_profile(settings.operation_profile) else: settings.operation_profile = "salt_master" seed_profile = aws.master_profile.copy() logger.info("Using default profile: %s " % settings.operation_profile) if not seed_profile: raise SeedProfileDoesNotExistError("%s" % settings.operation_profile) if settings.map_list: parse_map_file(settings.map_list) else: logger.info("map_file not specified. using existing salt_cloud_map file from resources.") logger.info("Profile being used: %s" % settings.operation_profile) seed_profile.name = instance_name or settings.name if settings.vpc_subnet: instance_id = seed_vpc(seed_profile, env=settings.vpc_subnet[0]) else: instance_id = seed_machine(seed_profile) water_machines(seed_profile, [instance_id])
def test_get_profile(self): for profile_name in self.profile_names: assert SEED_PROFILES.get(profile_name) == get_profile(profile_name)
def deploy_msd_to_node(libcloud_node, msd, private_key_path=None): ##msd = MultiStepDeployment(Scripts from water_machines above) logger.warn("TODO: REFACTOR AND TAKE OUT ec2-user literal") seed_profile = settings.operation_profile seed_profile = get_profile(seed_profile) pkey = seed_profile.keypair['local_path'] ssh_client = SSHClient(hostname=libcloud_node.private_ip[0], port=settings.SSH_PORT, username='******', password=None, key=pkey, timeout=int(settings.NETWORK_TIMEOUT),) attempts = 0 dns_attempts = 0 ##This begins a series of file placements for the masters subsequent deployment tasks in the init script. while True: time.sleep(5) if seed_profile.profile_name == "salt_master": dns_attempts += 1 logger.info("Number of attempts to connect: %s" % dns_attempts) try: logger.info("Attemping to connect to new node.") ssh_client.connect() logger.info("DNS SSH connection successful") except Exception as error: logger.info("DNS register ssh connection failed, trying again") dns_attempts += 1 if dns_attempts > 10: logger.error("DNS process failed to make a connection. Exiting.") break continue # salt-cloud files necessary for deployment for f in seed_profile.salt_cloud_vpc_files: try: cloud_files = FileDeployment(find_script(f), target="/home/%s/%s" % (seed_profile.ami_user, os.path.basename(f))) cloud_files.run(libcloud_node, ssh_client) logger.info("salt-cloud file %s placed in home directory" % f) except Exception as e: logger.error("could not place salt-cloud file: %s" % e) # places private key from path specified in keys.sh try: git_key = seed_profile.git_rsa_key git_key_file = FileDeployment(git_key, target= "/home/%s/%s" % (seed_profile.ami_user, os.path.basename(git_key))) git_key_file.run(libcloud_node, ssh_client) logger.info("Placed %s." % git_key_file.target) except Exception as e: logger.error("Could not place file: %s" % e) # places DNS registration files for the master to add itself to route 53 try: try_script = find_script(seed_profile.DNS_script) dns_file = FileDeployment(try_script, target="/home/%s/%s" % (seed_profile.ami_user, os.path.basename(try_script)) ) dns_file.run(libcloud_node, ssh_client) logger.info("Placed %s ." % dns_file.target) except Exception as e: logger.error("Could not place file: %s" % e) try: dns_command = find_script(seed_profile.DNS_command) domain = seed_profile.r53_domain r53_key = seed_profile.r53_key r53_secret = seed_profile.r53_secret w_command = open(dns_command, 'w') w_command.write("sudo python register_master_DNS.py '%(domain)s' '%(r53_key)s' '%(r53_secret)s'" % {'domain': domain, 'r53_key': r53_key, 'r53_secret': r53_secret}) w_command.close() c_deploy = FileDeployment(dns_command, target="/home/%s/%s" % (seed_profile.ami_user, os.path.basename(dns_command)) ) c_deploy.run(libcloud_node, ssh_client) r_command = open(dns_command, 'w') r_command.write(""" #This file get's blanked by the code to keep the keys out.\n echo 'The DNS register command did not make it to this file.'""" ) r_command.close() logger.info("The command file is in place") break except Exception as error: logger.error("Deployment of the DNS register file failed: %s", error) break else: print "%s isn't a master." % seed_profile.profile_name logger.warn("%s isn't a master." % seed_profile.profile_name) break ##This beings the deployment of init_scripts from water_machines while True: time.sleep(5) try: if ssh_client.connect() is True: # Deploy files to libcloud_node msd.run(libcloud_node, ssh_client) pubkey_file = find_script("master_public_keys.sh") ssh_key = get_public_key_from_file(pubkey_file) ssh_key.run(libcloud_node, ssh_client) for failed_step in msd.steps: try: execute_files_on_minion([failed_step], libcloud_node, ssh_client) except socket_timeout, timeout: logger.debug(timeout) # We'll have to have the minion ping the master # when it's alive and kicking so that we can confirm it's alive. # maybe via a webhook. # This happens when scripts you've implemented take to # long to complete. break except Exception, error: logger.error(error.message)