def deactivate_nodes(cf_conn, stackname): ec2_conn = AWS.get_ec2_connection() pm_instance = utils.get_puppetmaster_instance(ec2_conn) if pm_instance is None: return puppetmaster_ip = pm_instance.ip_address print "Deactivating nodes on puppetmaster (%s)" % puppetmaster_ip instance_ids = set() resources = cf_conn.list_stack_resources(stack_name_or_id=stackname) for r in resources: if r.resource_type == "AWS::EC2::Instance": instance_ids.add(r.physical_resource_id) password = fetch_secrets("secrets/hcs-root") ssh_conn = SSHable(puppetmaster_ip) ssh_conn.connect() for i in ec2_conn.get_only_instances(): if i.id in instance_ids: hostname = i.tags["Name"] print "Deactivating node: " + hostname streams = ssh_conn.ssh( "echo {0} | sudo -S puppet node clean {1}; echo {0} | sudo -S puppet node deactivate {1}".format( password, hostname ) ) print streams[1].read() ssh_conn.disconnect()
def validate_template(template_name): template_file = open("templates/%s.template" % template_name, "r") cf_conn = AWS.get_cf_connection() print "Attempting to validate %s template" % template_name # use a try catch because boto throws an exception. # it's pretty bad on their part but whatever. try: cf_conn.validate_template(template_body=template_file.read()) print "AWS claims no problems!" except boto.exception.BotoServerError, e: # and the exception contains the error message! print e
def print_servers(): Row = namedtuple("Row", ["Name", "PrivateIP", "PublicIP", "Subnet", "State"]) rows = [] for instance in AWS.get_ec2_connection().get_only_instances(): data = Row( Name=instance.tags["Name"], PrivateIP=instance.private_ip_address, PublicIP=instance.ip_address, Subnet=instance.subnet_id, State=instance.state, ) rows.append(data) pprinttable(rows)
def print_dns(): route53 = AWS.get_route53_connection() results = route53.get_all_hosted_zones() Row = namedtuple("Row", ["Name", "Type", "TTL", "Record"]) for zone in results["ListHostedZonesResponse"]["HostedZones"]: print "Domain: " + zone["Name"] + " (" + ("Private" if zone["Config"]["PrivateZone"] else "Public") + ")" # we need only the Id to get the DNS records zone_id = zone["Id"].replace("/hostedzone/", "") print "Zone Id: %s" % zone_id sets = route53.get_all_rrsets(zone_id) rows = [] for rset in sets: for i, rr in enumerate(rset.resource_records): r = rr.encode("ascii", "ignore") if i == 0: rows.append(Row(Name=rset.name, Type=rset.type, TTL=rset.ttl, Record=r)) else: rows.append(Row(Name="", Type="", TTL="", Record=r)) pprinttable(rows) print "\n"
import AWS #image_id = 'ami-e5083683' #Amazon Linux image_id = 'ami-405f7226' #Ubuntu Server 16.04 #image_id = 'ami-ee8dbb88' #Microsoft Windows Server 2016 Base #volume_id = 'YOUR VOLUME ID' instance_type = 't2.micro' running_dns = AWS.running_instance(instance_type) if running_dns != None: print(running_dns) else: #print(AWS.create_instance('t2.micro', image_id, volume_id)) print(AWS.create_instance(instance_type, image_id))
def main(): # arg parse parser = argparse.ArgumentParser(description="Command Line Tools for HCS Cloud Machines.") group = parser.add_mutually_exclusive_group() parser.add_argument( "--list_resources", "-l", help="list AWS resources. supports 'stacks','servers','dns','rds'", default=None ) # use a group because these are exclusive flags group.add_argument("--create", "-c", help="create a new HCS server from template", default=None) group.add_argument( "--purge", "-p", help="purges all cloudformation stacks that were rolled back", action="store_true" ) group.add_argument("--destroy", "-d", help="destroys specified stack", default=None) # this will cause boto to create a log file in your working directory parser.add_argument("--verbose", help="turn on boto logger for debugging", action="store_true") parser.add_argument("--level", help="verbosity level if verbose mode enabled", type=int, default=10) # option to validate templates parser.add_argument( "--validate", "-v", help="validate an AWS template. must be located in the templates folder", default=None ) # option to reboot things. parser.add_argument("--reboot", "-r", help="reboot a service instance. supports 'rds'", default=None) args = parser.parse_args() if args.verbose: boto.set_file_logger("hcs-cloudrunner", filepath="./boto.log", level=args.level) if args.create is not None: print "Creating a %s server" % args.create create_stack(args.create) elif args.list_resources is not None: if args.list_resources.lower() == "stacks": get_active_stacks(AWS.get_cf_connection()) elif args.list_resources.lower() == "servers": print_servers() elif args.list_resources.lower() == "dns": print_dns() elif args.list_resources.lower() == "rds": RDS.view_databases() else: print "Resource type not supported!" elif args.purge: cf_conn = AWS.get_cf_connection() purge_stacks(cf_conn) elif args.destroy is not None: cf_conn = AWS.get_cf_connection() print "WARNING!" print "THIS WILL PERMANENTLY DESTROY RESOURCES!" print "I'm going to confirm your action. Use ctrl+c to quit!" # generate a random 5 char sequence pw = "".join(random.choice(string.ascii_uppercase + string.digits) for a in range(5)) print "Are you sure you want to destroy the stack: [ " + args.destroy + " ] ?" match = raw_input("Type in the passkey to confirm: " + pw + " ") if match != pw: print "Stopped deletion!" return finalcountdown = raw_input("Are you REALLY sure? There's no going back! [ Type YES ] ") if finalcountdown.upper() == "YES": destroy_stack(cf_conn, args.destroy) else: print "Stopped deletion!" elif args.validate is not None: validate_template(args.validate) elif args.reboot is not None: if args.reboot.lower() == "rds": RDS.restart_database() else: parser.print_help()
def main(): global VERBOSE global collection #default access info awsAccessKey = 'AKIAJICPBE3SSHW5SR7A' awsSecretKey = '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' awsBucket = 'aqueti.data' #parse inputs # parse command line arguments parser = argparse.ArgumentParser(description='AWARE Database Script') parser.add_argument('-v', action='store_const', dest='VERBOSE', const='True', help='VERBOSE output') parser.add_argument('-vv', action='store_const', dest='VERBOSE2', const='True', help='VERBOSE output') parser.add_argument('-p', action='store_const', dest='printout', const='True', help='print contents of JSON file') parser.add_argument('-d', action='store', dest='path', help='path to data') parser.add_argument('-b', action='store', dest='bucket', help='S3 Bucket with data') parser.add_argument('-a', action='store', dest='aws_access', help='AWS access code') parser.add_argument('-s', action='store', dest='aws_secret', help='path to data') parser.add_argument('-f', action='store', dest='fname', help='filename to insert') parser.add_argument('-i', action='store_const', dest='insert', const='True', help='Add records to the given dictionary.') parser.add_argument('-r', action='store_const', dest='recurse', const='True', help='recursively add JSON files to the dictionary') parser.add_argument('-u', action='store_const', dest='update', const='True', help='update records') parser.add_argument('-c', action='store', dest='collection', help='collection (table) to use') parser.add_argument('dbase', help='database name') args = parser.parse_args() #set VERBOSE flag as requested if args.VERBOSE: VERBOSE = 1 if args.VERBOSE2: VERBOSE = 2 print "VERBOSE=2" #extract relevant parameters if VERBOSE > 1: print "Using database " + args.dbase ################################################## # connect to database and AWS server (if needed) ################################################## #connect to database mdb = MDB.MDB() if VERBOSE > 1: print "Connecting to mongodb: " + args.dbase try: rc = mdb.connect(args.dbase) except: print "MDB: Unable to connect to database: " + args.dbase return -1 if args.aws_access: awsAccessKey = args.aws_access if args.aws_secret: awsSecretKey = arts.aws_secret if args.bucket: awsBucket = args.bucket # Connect to AWS class #sdf - need to make this optional aws = AWS.AWS() aws.setVerbose(VERBOSE) if VERBOSE > 1: print "Connecting to AWS: " + awsAccessKey + "/" + awsSecretKey try: aws.connect(awsAccessKey, awsSecretKey) except: print "Unable to connect to AWS. Please check inputs" return -1 #Update specified database with the appropriate bucket if args.update: #ensure bucket and dbase are defined if awsBucket: if VERBOSE > 1: print "Updating database with bucket " + awsBucket rc = updateAWS(mdb, aws, awsBucket) print str(rc) + " records added to the database" if VERBOSE > 0: if rc > 0: print "ADB::main: Database updated successfully" return 1 else: print "ADB: Unable to update database. Return code:" + rc return -1 else: print "Unable to update. The database bucket name is not defined" return -2 return 1 #sdf - needs to be checked #if args.host != "" and args.port != -1: # mdb = MDB(args.dbase, args.host, args.port) #else: # mdb = MDB(args.dbase) if args.list_objects: if args.bucket == "ALL": aws.listBuckets() else: aws.listObjects(args.bucket) # bucket = conn.get_bucket('aqueti.data') # for key in bucket.list(): # print key.name.encode('utf-8') #We are inserting records. Check if recursing directories or not if args.insert: if args.fname: node = AJSON.readJson(args.fname) if isinstance(node, int): if VERBOSE > 0: print "Unable to read record" return -1 rc = insert(str(args.dbase), str(args.fname)) elif args.path: if args.recurse: recurse(args.dbase, str(args.path), "insert") else: print "Currently only insert capability is supported"
def clusterRemote(opt, arg): """Start a remote cluster over SSH""" # Load the remote cluster configuration clConfig = {} execfile(opt.clusterfile, clConfig) contConfig = clConfig['controller'] engConfig = clConfig['engines'] # Determine where to find sshx: sshx = clConfig.get('sshx', os.environ.get('IPYTHON_SSHX', 'sshx')) #ADDED CONFIG ITEMS sshOpts = clConfig['ssh_options_string'] pushConfig = clConfig['push_kwargs'] # Store all logs inside the ipython directory ipdir = cutils.get_ipython_dir() pjoin = os.path.join logfile = opt.logfile if logfile is None: logdir_base = pjoin(ipdir, 'log') ensureDir(logdir_base) logfile = pjoin(logdir_base, 'ipcluster') # Append this script's PID to the logfile name always logfile = '%s-%s' % (logfile, os.getpid()) print 'Starting controller:' # Controller data: xsys = os.system contHost = contConfig['host'] contLog = '%s-con-%s-' % (logfile, contHost) cmd = "ssh %s %s '%s' 'rm ~/.ipython/*.furl ~/.ipython/*.pem'" % \ (sshOpts,contHost,sshx) print 'cmd:<%s>' % cmd # dbg xsys(cmd) time.sleep(1) cmd = "ssh %s %s '%s' 'ipcontroller --logfile %s' &" % \ (sshOpts,contHost,sshx,contLog) print 'cmd:<%s>' % cmd # dbg xsys(cmd) time.sleep(2) import AWS #check for mpi - copied from above mpi = opt.mpi mpistr = '' if mpi: # start with mpi - killing the engines with sigterm will not work if you do this mpistr = '--mpi=' + mpi print 'Starting engines: ' for engineHost, engineData in engConfig.iteritems(): if isinstance(engineData, int): numEngines = engineData else: raise NotImplementedError( 'port configuration not finished for engines') print 'Pushing furl to %s' % engineHost AWS.push_engine_furl(engineHost, **pushConfig) print 'Starting %d engines on %s' % (numEngines, engineHost) engLog = '%s-eng-%s-' % (logfile, engineHost) for i in range(numEngines): #cmd = "ssh %s '%s' 'ipengine --controller-ip %s --logfile %s' &" % \ (engineHost,sshx,contHost,engLog) cmd = "ssh %s %s '%s' 'ipengine %s --logfile %s' &" % ( sshOpts, engineHost, sshx, mpistr, engLog) print 'cmd:<%s>' % cmd # dbg xsys(cmd) # Wait after each host a little bit time.sleep(1) startMsg(contConfig['host'])
def clusterRemote(opt, arg): """Start a remote cluster over SSH""" # Load the remote cluster configuration clConfig = {} execfile(opt.clusterfile, clConfig) contConfig = clConfig["controller"] engConfig = clConfig["engines"] # Determine where to find sshx: sshx = clConfig.get("sshx", os.environ.get("IPYTHON_SSHX", "sshx")) # ADDED CONFIG ITEMS sshOpts = clConfig["ssh_options_string"] pushConfig = clConfig["push_kwargs"] # Store all logs inside the ipython directory ipdir = cutils.get_ipython_dir() pjoin = os.path.join logfile = opt.logfile if logfile is None: logdir_base = pjoin(ipdir, "log") ensureDir(logdir_base) logfile = pjoin(logdir_base, "ipcluster") # Append this script's PID to the logfile name always logfile = "%s-%s" % (logfile, os.getpid()) print "Starting controller:" # Controller data: xsys = os.system contHost = contConfig["host"] contLog = "%s-con-%s-" % (logfile, contHost) cmd = "ssh %s %s '%s' 'rm ~/.ipython/*.furl ~/.ipython/*.pem'" % (sshOpts, contHost, sshx) print "cmd:<%s>" % cmd # dbg xsys(cmd) time.sleep(1) cmd = "ssh %s %s '%s' 'ipcontroller --logfile %s' &" % (sshOpts, contHost, sshx, contLog) print "cmd:<%s>" % cmd # dbg xsys(cmd) time.sleep(2) import AWS # check for mpi - copied from above mpi = opt.mpi mpistr = "" if mpi: # start with mpi - killing the engines with sigterm will not work if you do this mpistr = "--mpi=" + mpi print "Starting engines: " for engineHost, engineData in engConfig.iteritems(): if isinstance(engineData, int): numEngines = engineData else: raise NotImplementedError("port configuration not finished for engines") print "Pushing furl to %s" % engineHost AWS.push_engine_furl(engineHost, **pushConfig) print "Starting %d engines on %s" % (numEngines, engineHost) engLog = "%s-eng-%s-" % (logfile, engineHost) for i in range(numEngines): # cmd = "ssh %s '%s' 'ipengine --controller-ip %s --logfile %s' &" % \ (engineHost,sshx,contHost,engLog) cmd = "ssh %s %s '%s' 'ipengine %s --logfile %s' &" % (sshOpts, engineHost, sshx, mpistr, engLog) print "cmd:<%s>" % cmd # dbg xsys(cmd) # Wait after each host a little bit time.sleep(1) startMsg(contConfig["host"])