def get_cluster_ssh(self): """ This will create a SSH client object with a connection to the cluster's master instance Refer to http://boto.readthedocs.org/en/latest/ref/manage.html?highlight=ssh#boto.manage.cmdshell.SSHClient for all possible methods on the ssh client object :param ssh_script: Self :return: a boto ssh client object """ printout = 0 if self.cluster_dns == "": while self.cluster_dns == "": try: # if the dns doesn't exist, go grab it self.get_cluster_dns() instance = self.aws_connection.list_instances(self.cluster_id).instances # find the master instance out of all cluster instances for inst in instance: if inst.publicdnsname == self.cluster_dns: master_instance = inst break else: continue # this is a hack, for some reason boto cmdshell looks for a nonexistant dns_name attribute master_instance.dns_name = master_instance.publicdnsname ssh_client = sshclient_from_instance(master_instance, ssh_key_file = self.pem_key, user_name = "hadoop", ssh_pwd = None) # establish the ssh connection return ssh_client except: if printout == 0: print "Waiting for DNS to become available..." printout = 1 time.sleep(1) else: # if the dns exists, grab the master dns and establish a connection instance = self.aws_connection.list_instances(self.cluster_id).instances for inst in instance: if inst.publicdnsname == self.cluster_dns: master_instance = inst break else: continue master_instance.dns_name = master_instance.publicdnsname # this is a hack, for some reason boto cmdshell looks for this attribute ssh_client = sshclient_from_instance(master_instance, ssh_key_file = self.pem_key, user_name = "hadoop", ssh_pwd = None) return ssh_client
def _wait(self, key_path, login_user='******', sleep_time=2): """Wait for ssh access to instance""" if not self.instance: raise PynecroudError('Must launch instance first') log.info('Waiting for instance availability...') while not self.instance.update() == 'running': time.sleep(sleep_time) log.info('Waiting for ssh access...') sshclient_from_instance(self.instance, key_path, user_name=login_user)
def run_shell_command_on_instance(region=None, instance_name=None, cmd_line=None, user_name=None): '''Run shell command on EC2 instance.''' if region is None or region == '': region = EC2CFG['DEFAULT_REGION'] if user_name is None or region == '': user_name = EC2CFG['DEFAULT_USER_NAME'] # connect to region conn = get_connection(region) # get the instance object related to instance name reservation = conn.get_all_instances( filters={"tag:Name": "%s" % (instance_name)})[0] instance = reservation.instances[0] # create an SSH client for this instance ssh_client = sshclient_from_instance(instance, EC2CFG['PEM'][region], user_name=user_name) # run the command and get the results status, stdout, stderr = ssh_client.run(cmd_line) return (status, stdout, stderr)
def get_file_from_instance(region=None, instance_name=None, src=None, dst=True, user_name=None): '''Download file from EC2 instance.''' if region is None or region == '': region = EC2CFG['DEFAULT_REGION'] if user_name is None or region == '': user_name = EC2CFG['DEFAULT_USER_NAME'] # connect to region conn = get_connection(region) # get the instance object related to instance name reservation = conn.get_all_instances( filters={"tag:Name": "%s" % (instance_name)})[0] instance = reservation.instances[0] # create an SSH client for this instance ssh_client = sshclient_from_instance(instance, EC2CFG['PEM'][region], user_name=user_name) # get file from instance ssh_client.get_file(src, dst) return None
def update_gcc_explorers(): prev = ensure_at_least_two() await_at_least_two_healthy() if prev != 1: conn = boto.ec2.connect_to_region('us-east-1') reservations = conn.get_all_instances() for reservation in reservations: for instance in reservation.instances: if instance.state != 'running': print "Skipping {} instance {}".format(instance.state, instance.id) continue if "App" not in instance.tags or instance.tags["App"] != "GccExplorer": print "Skipping non-gcc explorer instance {}".format(instance.id) continue print "Connecting to", instance ssh_client = sshclient_from_instance(instance, "ec2-mattgodbolt.pem", user_name='ubuntu') print "Connected. Running command" status, stdout, stderr = ssh_client.run('sudo -i docker pull -a mattgodbolt/gcc-explorer && sudo service gcc-explorer restart') print "Status", status print "Stdout", stdout print "Stderr", stderr print "Done, waiting a minute" time.sleep(60) await_at_least_two_healthy() set_back_to(prev)
def wait_for_ping(self, instance, user=None): """ To obtain the SSH client, we must wait for the instance to boot, even the EC2 instance status is available. :param instance: created ec2 instance to wait for :param user: SSH user to use with the created key :return: SSHClient or None on error """ host_key_file = os.path.join(self.localpath, 'known_hosts') ping_arg = '-n' if os.name == 'posix': ping_arg = '-c' if not instance.public_dns_name: log.error("Spawned instance was not allocated a public IP. " "Please try again.") raise ping_cmd = 'ping {} 1 {}'.format(ping_arg, instance.public_dns_name) try: timeout = 0 while os.system(ping_cmd) != 0 or timeout >= 60: time.sleep(5) timeout += 5 # artificial wait for ssh service up status time.sleep(30) open(host_key_file, 'w').close() client = sshclient_from_instance(instance, os.path.join(self.localpath, self.key_name + '.pem'), host_key_file=host_key_file, user_name=user or self.user) except Exception as e: log.error(e) client = None return client
def setup_filesystem_on_host(instance_details, ssh_user, ssh_priv_key_path): dns_name = instance_details["public_dns_name"] role = instance_details["tags"]["Role"] logging.info("Setting up LVM filesystems on '%s' which is a '%s'" % (dns_name, role)) ssh_client = sshclient_from_instance(instance_details["instance"], ssh_key_file=ssh_priv_key_path, user_name=ssh_user) pulp_mountpoint = "/var/lib/pulp" if role.upper().startswith("CDS"): pulp_mountpoint = "/var/lib/pulp-cds" _install_lvm(ssh_client) # Expected partitions # /dev/xvdm for /var/log # /dev/xvdn for /var/lib/mongodb # /dev/xvdp for /var/lib/pulp or /var/lib/pulp-cds logging.info("Setting up /var/log on '%s'" % (dns_name)) _create_log_part(ssh_client, blockdevice="/dev/xvdm", vgname="vg0", lvname="var_log", mountpoint="/var/log") if instance_details["tags"]["Role"] == "RHUA": # only RHUA have this partition logging.info("Setting up /var/lib/mongodb on '%s'" % (dns_name)) _create_part(ssh_client, blockdevice="/dev/xvdn", vgname="vg1", lvname="var_mongodb", mountpoint="/var/lib/mongodb") if "HAPROXY" not in instance_details["tags"]["Role"]: # don't need to do this on haproxy logging.info("Setting up %s on '%s'" % (pulp_mountpoint, dns_name)) _create_pulp_part(ssh_client, blockdevice="/dev/xvdp", vgname="vg2", lvname="var_pulp", mountpoint=pulp_mountpoint)
def wait_for_ping(self, instance, user=None): """ To obtain the SSH client, we must wait for the instance to boot, even the EC2 instance status is available. :param instance: created ec2 instance to wait for :param user: SSH user to use with the created key :return: SSHClient or None on error """ host_key_file = os.path.join(self.localpath, 'known_hosts') ping_arg = '-n' if os.name == 'posix': ping_arg = '-c' if not instance.public_dns_name: log.error("Spawned instance was not allocated a public IP. " "Please try again.") raise ping_cmd = 'ping {} 1 {}'.format(ping_arg, instance.ip_address) try: timeout = 0 while os.system(ping_cmd) != 0 or timeout >= 60: time.sleep(5) timeout += 5 # artificial wait for ssh service up status time.sleep(30) open(host_key_file, 'w').close() client = sshclient_from_instance(instance, os.path.join( self.localpath, self.key_name + '.pem'), host_key_file=host_key_file, user_name=user or self.user) except Exception as e: log.error(e) client = None return client
def update(repo): conn = boto.ec2.connect_to_region('us-east-1') reservations = conn.get_all_instances() for reservation in reservations: for instance in reservation.instances: if instance.state != 'running': print "Skipping {} instance {}".format(instance.state, instance.id) continue if "App" not in instance.tags or instance.tags[ "App"] != "GccExplorer": print "Skipping non-gcc explorer instance {}".format( instance.id) continue print "Connecting to", instance ssh_client = sshclient_from_instance(instance, "ec2-mattgodbolt.pem", user_name='ubuntu') status, stdout, stderr = ssh_client.run( 'cd {} && git pull && make dist'.format(repo)) if status: print "Error" print stdout print stderr return False else: print "OK: " + stdout return True
def execute_script(instance, key_pair, script): ssh_client = sshclient_from_instance(instance, key_pair, user_name='ec2-user') status, stdin, stderr = ssh_client.run(script) print status print stdin print stderr
def _get_ssh_clients(self, iids): ssh_clients = [] for i in iids: instance = self.conn.get_all_instances([i])[0].instances[0] try: client = sshclient_from_instance(instance, self.pem_file, user_name='ec2-user') ssh_clients.append(client) except Exception as e: print "Caught error creating ssh connection" return ssh_clients
def wait_ssh(conn): print "Waiting for all the instances to accept SSH connection" ssh_client = paramiko.SSHClient() instances = conn.get_only_instances() for inst in instances: #print inst.tags if inst.state != 'running': continue if inst.tags['Name'][:7] != "worker_": continue sys.stdout.write(" - %s " % inst.tags['Name']) sys.stdout.flush() while True: try: sshclient_from_instance(inst, "./%s.pem" % sshKey, user_name=username) break except paramiko.SSHException: time.sleep(10) inst.update() print '.' sys.stdout.write(' ok!\n')
def get_ssh_client(self, instance): """ Get an ssh instance for a specific EC2 instance Args: instance: an EC2 instance object Returns: ssh client object """ return sshclient_from_instance(instance, ssh_key_file='/Users/fiannacci/.ssh/ec2_key.pem', user_name='ubuntu')
def restart_service(instance): ssh_client = sshclient_from_instance(instance, '/home/robert/relwellnlp.pem', user_name='ubuntu') ssh_client.known_hosts = None commands = [ 'sudo sv stop parser_daemon', 'sudo killall java', 'sudo sv start parser_daemon', 'sudo tail /var/log/runit/parser_poller/current' ] results = map(lambda x: ssh_client.run(x), commands) print results
def run(self, params={}): """TODO: Run action""" directory = params.get("directory") instance_id = params.get("instance_id") private_key = params.get("private_key") user = params.get("user") region = params.get("region") empty_json_output = {} # Create private key file f = open('./pk.pem', 'w') f.write(private_key) f.close() # Create command from user input command = 'python clam_av_run.py ' + directory try: # Connect to AWS instance reservations = self.connection.aws.get_all_instances( filters={'instance_id': instance_id}) instance = reservations[0].instances[0] ssh_client = sshclient_from_instance(instance, './pk.pem', user_name=user) # Copy the mount.sh script to the instance and make it executable ssh_client.put_file( "./komand_ec2_investigations/actions/clam_av_run.py", "./clam_av_run.py") # Execute the command and return the standard output status, stdout, stderr = ssh_client.run(command) # Remove script after running ssh_client.run('rm ./clam_av_run.py') if stdout.decode("utf-8").rstrip() == "0": results = empty_json_output self.logger.error( "Clam scan is not installed on host and is required to run" ) elif stderr.decode("utf-8") != "": results = empty_json_output self.logger.error(stderr.decode("utf-8").rstrip()) else: results = json.loads(stdout.decode("utf-8").rstrip()) except Exception: self.logger.error( "Something went wrong, command probably failed to run") raise self.logger.info(results) return results
def run(inst, commands, username='******'): """ Uses boto to figure out how to SSH in and run commands :return a tuple of tuples consisting of: # The integer status of the command # A string containing the output of the command # A string containing the stderr output of the command """ ssh_client = sshclient_from_instance(inst, user_name=username, ssh_key_file=path_join(expanduser('~'), '.ssh', 'aws', 'private', 'cscie90.pem')) return tuple(ssh_client.run(command) for command in commands)
def get_ssh_client(self, instance): """ Get an ssh instance for a specific EC2 instance Args: instance: an EC2 instance object Returns: ssh client object """ return sshclient_from_instance( instance, ssh_key_file='/Users/fiannacci/.ssh/ec2_key.pem', user_name='ubuntu')
def login_to_aws(): logger.info(u"logging in to aws") conn = boto.ec2.connect_to_region('us-west-2') # instance = conn.get_all_instances()[0].instances[0] ssh_client = None for reservation in conn.get_all_instances(): instance = reservation.instances[0] try: if not ssh_client: ssh_client = sshclient_from_instance(instance, "data/key.pem", user_name="ec2-user") print u"this instance worked: {}".format(instance) except Exception: pass return (conn, ssh_client)
def upload_image(self, file_path, filename): print "Unzipping %s on Test Server on path %s"%(filename, file_path) subprocess.call('python /www/supplified-cms/bulk_upload_images.py -f %s -p %s'%(filename, file_path), shell=True) print "Sending File to Prod Server ...... " try: today_date = datetime.date.strftime(datetime.date.today(), '%d-%m-%y') file_path = '/www/supplified-cms/static/upload_product_image/u/%s/%s'%(today_date, filename) subprocess.call(["scp","-i",ENVIRONMENT_VAR[CUR_ENV]['pem'], file_path, "ubuntu@%s:/www/public_html/upload_zip/"%(ENVIRONMENT_VAR[CUR_ENV]['dns'])]) conn = boto.ec2.connect_to_region('us-west-2') instance = conn.get_all_instances(ENVIRONMENT_VAR[CUR_ENV]['instance_id'])[0].instances[0] ssh_client = sshclient_from_instance(instance, ENVIRONMENT_VAR[CUR_ENV]['pem'], user_name='ubuntu') status, stdout, stderr = ssh_client.run('python /www/public_html/bulk_upload_images.py -f %s -p %s'%(filename, "/www/public_html/upload_zip/")) except: print "Error : ",sys.exc_info() print "Successfully Uploaded and Compressed"
def connect_ec2instance(instance_id, command): print("Trying to connect to " + instance_id) conn = boto.ec2.connect_to_region('ap-northeast-1') # Find the instance object related to my instanceId instance = conn.get_all_instances([instance_id])[0].instances[0] # Create an SSH client for our instance # key_path is the path to the SSH private key associated with instance # user_name is the user to login as on the instance (e.g. ubuntu, ec2-user, etc.) ssh_client = sshclient_from_instance(instance, './key1.pem', user_name='ec2-user') #server = FakeServer(instance,'./key1.pem') #ssh_client = SSHClient(server, './key1.pem', 'ec2-user') # Run the command. Returns a tuple consisting of: # The integer status of the command # A string containing the output of the command # A string containing the stderr output of the command status, stdout, stderr = ssh_client.run(command) print(stderr) return stdout
def update(repo): conn = boto.ec2.connect_to_region('us-east-1') reservations = conn.get_all_instances() for reservation in reservations: for instance in reservation.instances: if instance.state != 'running': print "Skipping {} instance {}".format(instance.state, instance.id) continue if "App" not in instance.tags or instance.tags["App"] != "GccExplorer": print "Skipping non-gcc explorer instance {}".format(instance.id) continue print "Connecting to", instance ssh_client = sshclient_from_instance(instance, "ec2-mattgodbolt.pem", user_name='ubuntu') status, stdout, stderr = ssh_client.run('cd {} && git pull && make dist'.format(repo)) if status: print "Error" print stdout print stderr return False else: print "OK: " + stdout return True
def setup_filesystem_on_host(instance_details, ssh_user, ssh_priv_key_path): dns_name = instance_details["public_dns_name"] role = instance_details["tags"]["Role"] logging.info("Setting up LVM filesystems on '%s' which is a '%s'" % (dns_name, role)) ssh_client = sshclient_from_instance(instance_details["instance"], ssh_key_file=ssh_priv_key_path, user_name=ssh_user) pulp_mountpoint = "/var/lib/pulp" if role.upper().startswith("CDS"): pulp_mountpoint = "/var/lib/pulp-cds" # Expected partitions # /dev/xvdq for /var/log # /dev/xvdr for /var/lib/mongodb # /dev/xvdt for /var/lib/pulp or /var/lib/pulp-cds logging.info("Setting up /var/log on '%s'" % (dns_name)) _create_log_part(ssh_client, blockdevice="/dev/xvdq", vgname="vg0", lvname="var_log", mountpoint="/var/log") logging.info("Setting up /var/lib/mongodb on '%s'" % (dns_name)) _create_part(ssh_client, blockdevice="/dev/xvdr", vgname="vg1", lvname="var_mongodb", mountpoint="/var/lib/mongodb") logging.info("Setting up %s on '%s'" % (pulp_mountpoint, dns_name)) _create_part(ssh_client, blockdevice="/dev/xvdt", vgname="vg2", lvname="var_pulp", mountpoint=pulp_mountpoint)
def run(self, params={}): """TODO: Run action""" directory = params.get("directory") device = params.get("device") instance_id = params.get("instance_id") private_key = params.get("private_key") user = params.get("user") region = params.get("region") empty_json_output = {} # Create private key file f = open('./pk.pem', 'w') f.write(private_key) f.close() # Create command from user input command = 'sudo ./mount.sh ' + directory + ' ' + device try: # Connect to AWS instance reservations = self.connection.aws.get_all_instances( filters={'instance_id': instance_id}) instance = reservations[0].instances[0] ssh_client = sshclient_from_instance(instance, './pk.pem', user_name=user) # Copy the mount.sh script to the instance and make it executable ssh_client.put_file("./komand_ec2_investigations/actions/mount.sh", "./mount.sh") ssh_client.run('chmod +x mount.sh') # Execute the command and return the standard output status, stdout, stderr = ssh_client.run(command) # Remove script after running ssh_client.run('rm ./mount.sh') if stdout.decode("utf-8").rstrip() == "0": result = empty_json_output self.logger.error( "Unable to mount device: %s. Verify volume is attached", device) elif stdout.decode("utf-8").rstrip() == "1": result = json.loads( '{"directory": "%s", "status": "Directory already mounted"}' % directory) self.logger.info( "Unable to mount directory: %s. Directory already mounted", directory) elif stdout.decode("utf-8").rstrip() == "2": result = empty_json_output self.logger.error( "Unable to mount directory: %s. Invalid directory", directory) else: result = json.loads(stdout.decode("utf-8").rstrip()) except Exception: self.logger.error( "No address associated with hostname %s. Verify instance is running and credentials are valid", instance_id) raise return result
import boto.ec2 import os import subprocess from boto.manage.cmdshell import sshclient_from_instance conn = boto.ec2.connect_to_region('ap-south-1a') instance = conn.get_all_instances(['i-070f5d4ed732eb5d8'])[0].instances[0] ssh_client = sshclient_from_instance(instance, 'sourav.pem', user_name='ec2-user') print(ssh_client.run('sudo yum install -y httpd')) print(ssh_client.run('sudo service httpd start')) print(ssh_client.run('sudo pip install boto3')) print(ssh_client.run('cat > part22.py')) print(ssh_client.run('python part22.py')) print(ssh_client.run('sudo mv test.html /var/www/html')) print(ssh_client.run('sudo systemctl start httpd')) print("done")
def restart_service(instance): ssh_client = sshclient_from_instance(instance, '/home/robert/relwellnlp.pem', user_name='ubuntu') ssh_client.known_hosts = None commands = ['sudo sv stop parser_daemon', 'sudo killall java', 'sudo sv start parser_daemon', 'sudo tail /var/log/runit/parser_poller/current'] results = map(lambda x: ssh_client.run(x), commands) print results
ec2_instance1_id = ec2_instance1.id ec2_instance2_id = ec2_instance2.id ec2_instance1_dns = ec2_instance1.public_dns_name ec2_instance2_dns = ec2_instance2.public_dns_name # https://groups.google.com/forum/#!topic/boto-users/j_CfsT-o19U ec2_connect.modify_instance_attribute(ec2_instance1_id, "groupSet", [sec_group_id]) ec2_connect.modify_instance_attribute(ec2_instance2_id, "groupSet", [sec_group_id]) bool_connection = False while bool_connection == False: print "Attempt to connect Instance 1..." try: ssh_connect = sshclient_from_instance( instance=ec2_instance1, ssh_key_file=KEY_DIR + KEY_NAME, user_name=DEFAULT_USER ) except Exception as e: print "Alert: {}.\n".format(e.message) print "Waiting for SSH service...", time.sleep(5) # Wait for SSH service else: print "Connection to instance1 is successful\n" time.sleep(2) ssh_connect.run_pty("sudo sed -i 's/requiretty/!requiretty/' /etc/sudoers") time.sleep(4) print ssh_connect.run( 'sudo yum update -y; sudo yum groupinstall -y "Web Server" "PHP Support"; sudo yum install -y php-mysql php-xml php-mbstring php-gd; sudo service httpd start; sudo chkconfig httpd on' ) bool_connection = True print "\n\n ********************* Web Server Installation completed *********************\n\n"
def install_packages(instance, path_to_key, install_casa=True, install_miniconda=False, casa_version="4.3", user_name='ubuntu', debug=False, verbose=True): ''' Install packages in a running instance. Requires paramiko for the SSH connection. Parameters ---------- instance : Running instance object A running instance. path_to_key : str Path to the SSH key attached to the instance. install_casa : bool, optional Install CASA on the VM. install_miniconda : bool, optional Installs a miniconda environment on the VM. Needed to run the web-server. casa_version : str {4.3}, optional Version of CASA to install. Currently only 4.3 is supported. debug : bool, optional Return an interactive shell to test connection before attempting installation. ''' if casa_version != "4.3": raise TypeError("Only CASA 4.3 is currently supported.") # The submodule requires some fiddling around, as it is setup to use SSH # keys. The extra lines allow use of https instead. # http://stackoverflow.com/questions/15674064/github-submodule-access-rights-travis-ci run_script = ["sudo apt-get update", "sudo apt-get -y install git", "git clone https://github.com/Astroua/aws_controller.git", "sed -i 's/[email protected]:/https:\/\/github.com\//' $HOME/aws_controller/.gitmodules", "sed -i 's/[email protected]:/https:\/\/github.com\//' $HOME/aws_controller/.git/config", "git -C $HOME/aws_controller submodule update --init --recursive --force", "sh $HOME/aws_controller/casa-deploy/general_install.sh"] if install_casa: run_script.append("sh $HOME/aws_controller/full_casa_install.sh "+ str(casa_version)) if install_miniconda: run_script.append("sh $HOME/aws_controller/casa-deploy/install_miniconda.sh") run_script.append("$HOME/miniconda/bin/conda update --yes conda") run_script.append("sh $HOME/aws_controller/casa-deploy/install_aegean.sh") # Start-up the SSH connection ssh_shell = sshclient_from_instance(instance, path_to_key, user_name=user_name) if debug: ssh_shell.shell() for cmd in run_script: status, stdout, stderr = ssh_shell.run(cmd) if verbose: print("Command: " + cmd) print("Status: " + str(status)) print("Out: " + str(stdout)) print("Error: " + str(stderr)) if status != 0: print(stderr) raise Exception("Failed on " + cmd) break
import boto.ec2 from boto.manage.cmdshell import sshclient_from_instance import time import praw # Connect to your region of choice conn = boto.ec2.connect_to_region('us-east-1') print(conn) # Find the instance object related to my instanceId instance = conn.get_all_instances(['i-3dc50e2a'])[0].instances[0] print(instance) # Create an SSH client for our instance # key_path is the path to the SSH private key associated with instance # user_name is the user to login as on the instance (e.g. ubuntu, ec2-user, etc.) ssh_client = sshclient_from_instance(instance, 'birdman.pem', user_name='ubuntu') reddit = praw.Reddit( user_agent="Python/TensorFlow: Seq2Seq Chat (by /u/yadayada1212)", client_id='M9qNqXnuHbDs8w', client_secret='UL0odj8bpIft91claCnXemPkFTI', username='******', password='******') # replace '' with text, which will represent the seq2seq data subreddit = reddit.subreddit('BlueLobster') op = reddit.redditor('yadayada1212') for comment in subreddit.stream.comments(): if comment.author != op: # Run the command. Returns a tuple consisting of: # The integer status of the command
import boto import boto.ec2 import paramiko from boto.manage.cmdshell import sshclient_from_instance access_key = '' secret_access_key = '' conn = boto.ec2.connect_to_region('us-east-1', aws_access_key_id=access_key, aws_secret_access_key=secret_access_key) instance = conn.get_all_instances(['i-0b51be599192e1780'])[0].instances[0] ssh_client = sshclient_from_instance( instance, 'C:\Program Files (x86)\OpenSSH\etc\\November2016.pem', user_name='ec2-user') print "Downloading file..." ssh_client.get_file("/home/ec2-user/November2016.pem", r"C:/Program Files (x86)/OpenSSH/etc/November2016.pem")
def get_ssh_client_list(): return [sshclient_from_instance(i, host_key_file = '/home/ec2-user/.ssh/known_hosts', ssh_key_file=keys.aws_pem,user_name='ec2-user') for i in getCrawlerInstances()]
def copy_file_to_web_server(local_filepath,web_server_filepath): ssh_client = sshclient_from_instance(getWebServerInstance(),host_key_file = '/home/ec2-user/.ssh/known_hosts', ssh_key_file=keys.aws_pem,user_name='ec2-user') ssh_client.put_file(local_filepath, web_server_filepath)
if res == 'y': return True elif res == 'n': return False if __name__ == '__main__': conn = boto.ec2.connect_to_region('us-east-1') reservations = conn.get_all_instances() for reservation in reservations: for instance in reservation.instances: if instance.state != 'running': print "Skipping {} instance {}".format(instance.state, instance.id) continue if not do_this_one(instance.id): continue print "Connecting to", instance ssh_client = sshclient_from_instance(instance, "ec2-mattgodbolt.pem", user_name='ubuntu') print "Connected. Running command" status, stdout, stderr = ssh_client.run( 'sudo bash -c "export S3_ACCESS_KEY={}; export S3_SECRET_KEY={}; cd /compiler-explorer-image; git pull && bash setup.sh"' .format(S3_ACCESS_KEY, S3_SECRET_KEY)) print "Status", status print "Stdout", stdout print "Stderr", stderr print "Done"
import boto3 from boto.manage.cmdshell import sshclient_from_instance conn=boto3.resource('ec2') instance = conn.get_all_instances(['i-064c1aca25ee9138a'])[0].instances[0] print(instance) ssh_client = sshclient_from_instance('i-064c1aca25ee9138a','sourav.pem',user_name='ec2-user') print (ssh_client.run('sudo yum install -y httpd'))
def export_researchers(do_all=False, job_type="normal", filename=None, view=None): logger.info(u"logging in to aws") conn = boto.ec2.connect_to_region('us-west-2') instance = conn.get_all_instances()[0].instances[0] ssh_client = sshclient_from_instance(instance, "data/key.pem", user_name="ec2-user") logger.info(u"log in done") if filename: base_filename = filename.rsplit("/")[-1] base_filename = base_filename.split(".")[0] else: base_filename = "export_queue" if do_all: filename = base_filename + "_full.csv" if not view: view = "export_queue" command = """psql {}?ssl=true -c "\copy (select * from {} e) to '{}' WITH (FORMAT CSV, HEADER);" """.format( os.getenv("DATABASE_URL"), view, filename) elif job_type: filename = base_filename + "_hybrid.csv" if not view: view = "export_queue_with_hybrid" command = """psql {}?ssl=true -c "\copy (select * from {}) to '{}' WITH (FORMAT CSV, HEADER);" """.format( os.getenv("DATABASE_URL"), view, filename) else: filename = base_filename + ".csv" if not view: view = "export_full" command = """psql {}?ssl=true -c "\copy (select * from {}) to '{}' WITH (FORMAT CSV, HEADER);" """.format( os.getenv("DATABASE_URL"), view, filename) logger.info(command) status, stdout, stderr = ssh_client.run(command) logger.info(u"{} {} {}".format(status, stdout, stderr)) command = """gzip -c {} > {}.gz;""".format(filename, filename) logger.info(command) status, stdout, stderr = ssh_client.run(command) logger.info(u"{} {} {}".format(status, stdout, stderr)) command = """aws s3 cp {}.gz s3://oadoi-export/{}.gz --acl public-read;""".format( filename, filename) logger.info(command) status, stdout, stderr = ssh_client.run(command) logger.info(u"{} {} {}".format(status, stdout, stderr)) # also do the non .gz one because easier command = """aws s3 cp {} s3://oadoi-export/{} --acl public-read;""".format( filename, filename) logger.info(command) status, stdout, stderr = ssh_client.run(command) logger.info(u"{} {} {}".format(status, stdout, stderr)) logger.info( u"now go to *** https://console.aws.amazon.com/s3/object/oadoi-export/{}.gz?region=us-east-1&tab=overview ***" .format(filename)) logger.info( u"public link is at *** https://s3-us-west-2.amazonaws.com/oadoi-export/{}.gz ***" .format(filename)) conn.close()
def export_clarivate(do_all=False, job_type="normal", filename=None, view=None): # ssh -i /Users/hpiwowar/Dropbox/ti/certificates/aws-data-export.pem [email protected] # aws s3 cp test.txt s3://mpr-ims-harvestor/mpr-ims-dev/harvestor_staging_bigBatch/OA/test.txt # connect to our bucket conn = boto.ec2.connect_to_region('us-west-2') instance = conn.get_all_instances()[0].instances[0] ssh_client = sshclient_from_instance(instance, "data/key.pem", user_name="ec2-user") # to connect to clarivate's bucket # clarivate_conn = boto.ec2.connect_to_region('us-east-2') # clarivate_instance = clarivate_conn.get_all_instances()[0].instances[0] # clarivate_ssh_client = sshclient_from_instance(clarivate_instance, "/Users/hpiwowar/Dropbox/ti/certificates/aws-data-export.pem", user_name="ec2-user") logger.info(u"log in done") now_timestamp = datetime.datetime.utcnow().isoformat()[0:19].replace( ":", "") filename = "all_dois_{}.csv".format(now_timestamp) if not view: view = "export_main_changed" command = """psql {}?ssl=true -c "\copy (select * from {}) to '{}' WITH (FORMAT CSV, HEADER);" """.format( os.getenv("DATABASE_URL"), view, filename) logger.info(command) status, stdout, stderr = ssh_client.run(command) logger.info(u"{} {} {}".format(status, stdout, stderr)) command = """gzip -c {} > {}.gz;""".format(filename, filename) logger.info(command) status, stdout, stderr = ssh_client.run(command) logger.info(u"{} {} {}".format(status, stdout, stderr)) command = """date -r {}.gz;""".format(filename) logger.info(command) status, stdout, stderr = ssh_client.run(command) logger.info(u"{} {} {}".format(status, stdout, stderr)) gz_modified = stdout.strip() # command = """aws s3 cp {}.gz s3://mpr-ims-harvestor/mpr-ims-dev/harvestor_staging_bigBatch/OA/{}.gz --acl public-read --metadata "modifiedtimestamp='{}'";""".format( # filename, filename, gz_modified) command = """aws s3 cp {}.gz s3://oadoi-for-clarivate/{}.gz --acl public-read --metadata "modifiedtimestamp='{}'";""".format( filename, filename, gz_modified) logger.info(command) status, stdout, stderr = ssh_client.run(command) logger.info(u"{} {} {}".format(status, stdout, stderr)) # also make a .DONE file # how to calculate a checksum http://www.heatware.net/linux-unix/how-to-create-md5-checksums-and-validate-a-file-in-linux/ command = """md5sum {}.gz > {}.gz.DONE;""".format(filename, filename) logger.info(command) status, stdout, stderr = ssh_client.run(command) logger.info(u"{} {} {}".format(status, stdout, stderr)) command = """date -r {}.gz;""".format(filename) logger.info(command) status, stdout, stderr = ssh_client.run(command) logger.info(u"{} {} {}".format(status, stdout, stderr)) gz_done_modified = stdout.strip() # copy up the .DONE file # command = """aws s3 cp {}.gz.DONE s3://mpr-ims-harvestor/mpr-ims-dev/harvestor_staging_bigBatch/OA/{}.gz.DONE --acl public-read --metadata "modifiedtimestamp='{}'";""".format( # filename, filename, gz_done_modified) command = """aws s3 cp {}.gz.DONE s3://oadoi-for-clarivate/{}.gz.DONE --acl public-read --metadata "modifiedtimestamp='{}'";""".format( filename, filename, gz_done_modified) logger.info(command) status, stdout, stderr = ssh_client.run(command) logger.info(u"{} {} {}".format(status, stdout, stderr)) # logger.info(u"now go to *** https://console.aws.amazon.com/s3/object/mpr-ims-harvestor/mpr-ims-dev/harvestor_staging_bigBatch/OA/{}.gz?region=us-east-1&tab=overview ***".format( # filename)) logger.info( u"public link is at *** https://s3-us-west-2.amazonaws.com/oadoi-for-clarivate/test/{}.gz ***" .format(filename)) conn.close()
hostsAdd = createHostAdditions( instances ) # Create an SSH client for our instance # key_path is the path to the SSH private key associated with instance # user_name is the user to login as on the instance (e.g. ubuntu, ec2-user, etc.) key_path = os.environ["AWS_SSH_PRIVATE_KEY"] private_security_group_name = os.environ["AWS_SECURITY_GROUP_PRIVATE"] group = ec2conn.get_all_security_groups( private_security_group_name )[0] jini_locators = createJiniLocatorsSubstitution() print "JINI_LOCATORS = " + jini_locators i = 1 for host in bigdataHosts: ssh_client = sshclient_from_instance( host, key_path, user_name='ubuntu' ) # ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # Run the command. Returns a tuple consisting of: # The integer status of the command # A string containing the output of the command # A string containing the stderr output of the command status, stdin, stderr = ssh_client.run( "sudo sh -c 'echo \"" + hostsAdd + "\" >> /etc/hosts'" ) status, stdin, stderr = ssh_client.run( "sudo sh -c 'echo " + str(i) + " > /var/lib/zookeeper/myid'" ) status, stdin, stderr = ssh_client.run( createZookeeperSubstitution( "1", bigdataA, hostMap[ bigdataA ] ) ) status, stdin, stderr = ssh_client.run( createZookeeperSubstitution( "2", bigdataB, hostMap[ bigdataB ] ) ) status, stdin, stderr = ssh_client.run( createZookeeperSubstitution( "3", bigdataC, hostMap[ bigdataC ] ) ) status, stdin, stderr = ssh_client.run( jini_locators ) hostAddress = host.__dict__['private_ip_address'] + "/32"
pem_file_location = input( "Please ENTER the .pem file location (don't include name of file)\n") conn = boto.ec2.connect_to_region('us-west-2') instance = conn.get_all_instances([instance_id])[0].instances[0] os.system('chmod 400 %s/%s.pem' % (pem_file_location, pem_name)) server_localuser = input( "ENTER THE USER FOR SERVER (FOR EXAMPLE : type 'ubuntu' for UBUNTU machine)\n" ) Python_version = input( "ENTER THE PYTHON VERSION FOR VIRTUALENV .FOR EXAMPLE: '2' or '3' \n") Virtualenv_name = input("ENTER VIRTAULENV NAME\n") ssh_client = sshclient_from_instance(instance, pem_file_location + "/" + pem_name + ".pem", user_name=server_localuser) status, stdout, stderr = ssh_client.run( 'sudo apt update && sudo apt install -y python3-pip && sudo apt install -y virtualenv && virtualenv -p python%s %s' % (Python_version, Virtualenv_name)) status, stdout, stderr = ssh_client.run( "source %s/bin/activate && sudo apt install -y nginx && pip install gunicorn" % (Virtualenv_name)) os.system('scp -i %s/%s.pem %s/ssh-git.py ubuntu@%s:/home/ubuntu' % (pem_file_location, pem_name, pem_file_location, IP)) status, stdout, stderr = ssh_client.run("python3 ssh-git.py") print("OUTPUT : \n", stdout) print("ERROR MESSAGE :\n", stderr)
import boto3 from boto.manage.cmdshell import sshclient_from_instance from csparkbench.config import * ec2 = boto3.resource('ec2', region_name=REGION) instances = ec2.instances.filter( Filters=[{'Name': 'instance-state-name', 'Values': ['running']}, {'Name': 'tag:ClusterId', 'Values': [CLUSTER_ID]} ]) print("Instance Found: " + str(len(list(instances)))) if len(list(instances)) == 0: print("No instances running") exit(1) master_instance = list(instances)[0] ssh_client = sshclient_from_instance(master_instance, KEYPAIR_PATH, user_name='ubuntu') path = DATA_AMI[REGION]["keypair"] + ".pem" status = ssh_client.run('[ ! -e %s ]; echo $?' % path) print(status)
ec2_instance2_id = ec2_instance2.id ec2_instance1_dns = ec2_instance1.public_dns_name ec2_instance2_dns = ec2_instance2.public_dns_name # https://groups.google.com/forum/#!topic/boto-users/j_CfsT-o19U ec2_connect.modify_instance_attribute(ec2_instance1_id, "groupSet", [sec_group_id]) ec2_connect.modify_instance_attribute(ec2_instance2_id, "groupSet", [sec_group_id]) bool_connection = False while bool_connection == False: print "Attempt to connect Instance 1..." try: ssh_connect = sshclient_from_instance(instance=ec2_instance1, ssh_key_file=KEY_DIR + KEY_NAME, user_name=DEFAULT_USER) except Exception as e: print "Alert: {}.\n".format(e.message) print "Waiting for SSH service...", time.sleep(5) # Wait for SSH service else: print "Connection to instance1 is successful\n" time.sleep(2) ssh_connect.run_pty( "sudo sed -i \'s/requiretty/!requiretty/\' /etc/sudoers") time.sleep(4) print ssh_connect.run( "sudo yum update -y; sudo yum groupinstall -y \"Web Server\" \"PHP Support\"; sudo yum install -y php-mysql php-xml php-mbstring php-gd; sudo service httpd start; sudo chkconfig httpd on" ) bool_connection = True
def export(do_all=False, job_type="normal", filename=None, view=None): logger.info(u"logging in to aws") conn = boto.ec2.connect_to_region('us-west-2') instance = conn.get_all_instances()[0].instances[0] ssh_client = sshclient_from_instance(instance, "data/key.pem", user_name="ec2-user") logger.info(u"log in done") now_timestamp = datetime.datetime.utcnow().isoformat()[0:19].replace( "-", "").replace(":", "") filename = "all_dois_{}.csv".format(now_timestamp) filename = "all_dois_20170812T210215.csv" view = "export_main_for_researchers" # command = """psql {}?ssl=true -c "\copy (select * from {}) to '{}' WITH (FORMAT CSV, HEADER);" """.format( # os.getenv("DATABASE_URL"), view, filename) # logger.info(command) # status, stdout, stderr = ssh_client.run(command) # logger.info(u"{} {} {}".format(status, stdout, stderr)) command = """gzip -c {} > {}.gz; date;""".format(filename, filename) logger.info(command) status, stdout, stderr = ssh_client.run(command) logger.info(u"{} {} {}".format(status, stdout, stderr)) command = """aws s3 cp {}.gz s3://oadoi-export/full/{}.gz --acl public-read; date; """.format( filename, filename) logger.info(command) status, stdout, stderr = ssh_client.run(command) logger.info(u"{} {} {}".format(status, stdout, stderr)) # also do the non .gz one because easier command = """aws s3 cp {} s3://oadoi-export/full/{} --acl public-read; date;""".format( filename, filename) logger.info(command) status, stdout, stderr = ssh_client.run(command) logger.info(u"{} {} {}".format(status, stdout, stderr)) # also make a .DONE file # how to calculate a checksum http://www.heatware.net/linux-unix/how-to-create-md5-checksums-and-validate-a-file-in-linux/ command = """md5sum {}.gz > {}.gz.DONE; date;""".format(filename, filename) logger.info(command) status, stdout, stderr = ssh_client.run(command) logger.info(u"{} {} {}".format(status, stdout, stderr)) # copy up the .DONE file command = """aws s3 cp {}.gz.DONE s3://oadoi-export/full/{}.gz.DONE --acl public-read; date;""".format( filename, filename) logger.info(command) status, stdout, stderr = ssh_client.run(command) logger.info(u"{} {} {}".format(status, stdout, stderr)) logger.info( u"now go to *** https://console.aws.amazon.com/s3/object/oadoi-export/full/{}.gz?region=us-east-1&tab=overview ***" .format(filename)) logger.info( u"public link is at *** https://s3-us-west-2.amazonaws.com/oadoi-export/full/{}.gz ***" .format(filename)) conn.close()
############################################################################################################ import boto.ec2 from boto.manage.cmdshell import sshclient_from_instance # Connect to your region of choice conn = boto.ec2.connect_to_region('us-west-2') # Find the instance object related to my instanceId instance = conn.get_all_instances(['i-12345678'])[0] # Create an SSH client for our instance # key_path is the path to the SSH private key associated with instance # user_name is the user to login as on the instance (e.g. ubuntu, ec2-user, etc.) ssh_client = sshclient_from_instance(instance, key_path='<path to SSH keyfile>', user_name='ec2-user') # Run the command. Returns a tuple consisting of: # The integer status of the command # A string containing the output of the command # A string containing the stderr output of the command status, stdin, stderr = ssh_client.run('ls -al') ############################################################################################################ ##For future reference, this is how to start a stopped instance: instance = conn.get_all_instances(instance_ids=['instance_id']) print instance[0].instances[0].start()
def get_ssh_client(): #return sshclient_from_instance(getCrawlerInstance(), host_key_file = '/home/ec2-user/.ssh/known_hosts', ssh_key_file='',user_name='ec2-user') return sshclient_from_instance(getCrawlerInstance(), host_key_file = '/home/ec2-user/.ssh/known_hosts', ssh_key_file=keys.aws_pem,user_name='ec2-user')
def deploy(): ret = None for i in ec2.getCrawlerInstances(): if not i.ip_address: continue print "[%s] %s" % (i.id, i.ip_address) ssh_client = sshclient_from_instance(ec2.getInstanceFromInstanceName(i.id), host_key_file = '/home/ec2-user/.ssh/known_hosts', ssh_key_file=keys.aws_pem,user_name='ec2-user') #ssh_client = sshclient_from_instance(ec2.getInstanceFromInstanceName(i.id), host_key_file = '/home/ec2-user/.ssh/known_hosts', ssh_key_file="",user_name='ec2-user') ssh_client.put_file('/home/ec2-user/bblio/scraper/scrapyd.conf','/home/ec2-user/scrapyd.conf') home_dir = '/home/ec2-user/bblio/' copyList = [] copyList.append(home_dir + 'build/search/models.py') copyList.append(home_dir + 'build/search/__init__.py') copyList.append(home_dir + 'build/Build/__init__.py') copyList.append(home_dir + 'build/Build/settings.py.crawler') copyList.append(home_dir + 'build/Build/myScript.py.crawler') copyList.append(home_dir + 'build/manage.py') copyList.append(home_dir + 'build/__init__.py') copyList.append(home_dir + 'aws/ec2.py') copyList.append(home_dir + 'aws/keys.py') copyList.append(home_dir + 'aws/key.pem') copyList.append(home_dir + 'aws/__init__.py') copyList.append(home_dir + 'config_file.py') copyList.append(home_dir + '__init__.py') dirList = [] for c in copyList: c_dir = os.path.dirname(c) prev_dir = '' while c_dir != prev_dir and c_dir not in home_dir: if c_dir not in dirList: dirList.append(c_dir) prev_dir = c_dir c_dir = os.path.dirname(c_dir) dirList.append(home_dir) dirList.sort(lambda x,y: cmp(len(x), len(y))) for d in dirList: print('[dir][%s] %s' % (ssh_client.server.instance_id, d)) ssh_client.run('mkdir %s' % d) for c in copyList: print('[file][%s] %s' % (ssh_client.server.instance_id, c)) ssh_client.put_file(c,c.replace('.crawler','')) with open("/home/ec2-user/bblio/scraper/deployable/scrapy.cfg", "w") as f: f.write( """ [settings] default = deployable.settings [deploy] project = deployable\n """ ) f.write("url = http://") f.write(i.ip_address) f.write(":6800") print i.ip_address p = Popen(['scrapyd-deploy'],stdout=PIPE,shell=True,cwd='/home/ec2-user/bblio/scraper/deployable') j = None while True: out = p.stdout.read() if out == '' and p.poll() != None: break if out != '': if '{' in out: j = out j = json.loads(out) sys.stdout.write(out) sys.stdout.flush() #if j['status'] != 'ok': #ret = ret + str(i.ip_address) + ' failed\n' return ret
def do_this_one(name): while True: res = raw_input("Update {}? ".format(name)) if res == 'y': return True elif res == 'n': return False if __name__ == '__main__': conn = boto.ec2.connect_to_region('us-east-1') reservations = conn.get_all_instances() for reservation in reservations: for instance in reservation.instances: if instance.state != 'running': print "Skipping {} instance {}".format(instance.state, instance.id) continue if not do_this_one(instance.id): continue print "Connecting to", instance ssh_client = sshclient_from_instance(instance, "ec2-mattgodbolt.pem", user_name='ubuntu') print "Connected. Running command" status, stdout, stderr = ssh_client.run('sudo bash -c "export S3_ACCESS_KEY={}; export S3_SECRET_KEY={}; cd /gcc-explorer-image; git pull && bash setup.sh"'.format( S3_ACCESS_KEY, S3_SECRET_KEY)) print "Status", status print "Stdout", stdout print "Stderr", stderr print "Done"
print 'Uploading files...\n' time.sleep(90) hostaddr = 'ubuntu@' + ipaddr cmd = 'scp -oStrictHostKeyChecking=no -ri kp1.pem upload/* ' + hostaddr + ':~/' p = subprocess.Popen(cmd, shell=True) p.wait() print '\nUploading files complete!' time.sleep(3) ## Run deploy script on server print 'Running script on server...' time.sleep(3) ssh_client = sshclient_from_instance(instance, 'kp1.pem', user_name='ubuntu') status, stdout, stderr = ssh_client.run('python server_deploy.py') print 'Running script on server complete!' time.sleep(3) print '\nDeployment complete!\n' time.sleep(3) print 'IP: ' + ipaddr dns = 'http://ec2-' + new_str + '.compute-1.amazonaws.com:8080/' print 'DNS: ' + dns ## End