def threadable_remote_upload_tar(remote_machines): """ <Purpose> Uploads the deploy.tar to each machine before running anything. Machines that timeout are added to the unreachable_hosts list in the dictionary. <Arguments> remote_machines: list of tuples with (user, ip) IPs that we have to cleanup. <Exceptions> None. <Side Effects> Temporarily locks thread_communications dict which is used by other threads trying to upload (if they run into an error). <Returns> None. """ # Assume single element if it's not a list if type(remote_machines) != type([]): remote_machines = [remote_machines] # for every machine in our list... for machine_tuple in remote_machines: # split up the tuple username = machine_tuple[0] machine = machine_tuple[1] deploy_logging.log('Setup', 'Attemping tar file upload via scp on ' + machine) scp_errcode, scp_stdout, scp_stderr = upload_tar( username, str(machine)) out, err = deploy_logging.format_stdout_and_err(scp_stdout, scp_stderr) # check the error codes if str(scp_errcode) == '0': deploy_logging.log('Setup', ' scp file upload complete on ' + machine) elif str(scp_errcode) == '1': deploy_logging.logerror('Could not establish a connection with ' + machine + ' (' + err + ')') deploy_threading.add_unreachable_host((username, machine)) else: deploy_logging.logerror('scp returned unknown error code ' + str(scp_errcode) + ' (' + err + ')') deploy_threading.add_unreachable_host((username, machine))
def threadable_remote_upload_tar(remote_machines): """ <Purpose> Uploads the deploy.tar to each machine before running anything. Machines that timeout are added to the unreachable_hosts list in the dictionary. <Arguments> remote_machines: list of tuples with (user, ip) IPs that we have to cleanup. <Exceptions> None. <Side Effects> Temporarily locks thread_communications dict which is used by other threads trying to upload (if they run into an error). <Returns> None. """ # Assume single element if it's not a list if type(remote_machines) != type([]): remote_machines = [remote_machines] # for every machine in our list... for machine_tuple in remote_machines: # split up the tuple username = machine_tuple[0] machine = machine_tuple[1] deploy_logging.log('Setup', 'Attemping tar file upload via scp on '+machine) scp_errcode, scp_stdout, scp_stderr = upload_tar(username, str(machine)) out, err = deploy_logging.format_stdout_and_err(scp_stdout, scp_stderr) # check the error codes if str(scp_errcode) == '0': deploy_logging.log('Setup', ' scp file upload complete on '+machine) elif str(scp_errcode) == '1': deploy_logging.logerror('Could not establish a connection with '+machine+' ('+err+')') deploy_threading.add_unreachable_host((username, machine)) else: deploy_logging.logerror('scp returned unknown error code '+str(scp_errcode)+' ('+err+')') deploy_threading.add_unreachable_host((username, machine))
else: print "ERROR: Specified instructional machine filepath is"+\ " not a valid file ("+opt[1]+")" return else: print 'Invalid instructional machine path specified, not going to die.' return # print intro print_notification() # Execute the tar creation script out, err, returncode = shellexec2('python create_tar.py '+custom_script_name) # Just formatting the out and err from executing the shell script. out, err = deploy_logging.format_stdout_and_err(out, err) # print if not empty if out: print out if err: print err print deploy_logging.sep # if all went sucessfully.. if returncode == 0: # setup all the directories.. prep_local_dirs(keep) print "Entering upload and execution script... (this may take a while)"
def remote_upload_file(local_fn_path, user, remote_host, retry_on_refusal = 3, connect_timeout = default_connection_timeout): """ <Purpose> This uses scp to upload a file to a remote computer. <Arguments> local_fn_path: Which file do we chuck to the remote computer? user: user to log in as remote_host: the ip/name of the machine we're connecting to. retry_on_refusal: Optional. Integer. Has number of times to retry the connection IF it was refused (built in to take care of not 'spamming' the remote server) connect_timeout: Optional. Integer. Time in seconds for ssh to timeout if no response was received. <Exceptions> None. <Side Effects> None. <Returns> Tuple. (out, err, returncode) Details: out: stdout from scp err: err from ssh returncode: scp's exit code """ # check that local file exists. if not os.path.isfile(local_fn_path): deploy_logging.logerror('Problem with local file: it does not exist!') raise Exception('Please check calling method.') scp_proc_handle = subprocess.Popen('scp -o BatchMode=yes -o '+\ 'ConnectTimeout='+str(connect_timeout)+' -o StrictHostKeyChecking=no '+\ ' '+local_fn_path+' '+user+"@"+remote_host+":", shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE) scp_proc_pid = scp_proc_handle.pid # start thread to monitor timeouts (on another thread) deploy_threading.monitor_timeout(scp_proc_pid, int(connect_timeout), remote_host, user) # execute and block until done... out, err = scp_proc_handle.communicate('') returncode = scp_proc_handle.returncode # retry if conn. was refused? if retry_on_refusal: # check if we got a connection refused. if we did, could be cuz we're # spamming the server, so sleep and then try again didwesleep = sleep_on_conn_refused(out, err, retry_on_refusal, remote_host) # we slept, so call function again and try to execute if didwesleep: # run again, but this time decrement retry counter out, err, returncode = remote_upload_file(local_fn_path, user, remote_host, retry_on_refusal - 1, connect_timeout = default_connection_timeout) # format the string out, err = deploy_logging.format_stdout_and_err(out, err) return out, err, returncode
def remote_download_file(remote_fn_path, local_fn_path, user, remote_host, retry_on_refusal = 3, connect_timeout = default_connection_timeout): """ <Purpose> This uses scp to download a file from a remote computer. <Arguments> remote_fn_path: The path to the file to download (remote file) local_fn_path: Where do we put it on this computer? user: user to log in as remote_host: the ip/name of the machine we're connecting to. retry_on_refusal: Optional. Integer. Has number of times to retry the connection IF it was refused (built in to take care of not 'spamming' the remote server) connect_timeout: Optional. Integer. Time in seconds for ssh to timeout if no response was received. <Exceptions> None. <Side Effects> None. <Returns> Tuple. (out, err, returncode) Details: out: stdout from scp err: err from ssh returncode: scp's exit code """ # local_fn_path will have the path + name of file # get the fn by doing some string math.. dir_to_local_file, junk, localfn = local_fn_path.rpartition('/') # is the dir real? if not os.path.isdir(dir_to_local_file): deploy_logging.logerror('Local destination directory does not exist.') raise Exception('Please check calling method.') # the SCP handle used scp_proc_handle = subprocess.Popen('scp -o BatchMode=yes -o '+\ 'ConnectTimeout='+str(connect_timeout)+' -o StrictHostKeyChecking=no '+\ ' '+user+'@'+remote_host+':'+remote_fn_path+\ ' '+local_fn_path, shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE) # set the PID of the process so we can set a timeout later scp_proc_pid = scp_proc_handle.pid # start thread to monitor timeouts (on another thread) deploy_threading.monitor_timeout(scp_proc_pid, int(connect_timeout), remote_host, user) # execute out, err = scp_proc_handle.communicate('') returncode = scp_proc_handle.returncode # retry if conn. was refused? if retry_on_refusal: # check if we got a connection refused. if we did, could be cuz we're spamming # the server, so sleep and then try again didwesleep = sleep_on_conn_refused(out, err, retry_on_refusal, remote_host) # we slept, so call function again and try to execute if didwesleep: # run again, but this time decrement retry counter out, err, returncode = remote_download_file(remote_fn_path, local_fn_path, user, remote_host, retry_on_refusal - 1, connect_timeout = default_connection_timeout) # format the string out, err = deploy_logging.format_stdout_and_err(out, err) return out, err, returncode
def remote_download_dir(remote_source_dir, local_dest_dir, user, remote_host, retry_on_refusal = 3, connect_timeout = default_connection_timeout): """ <Purpose> This uses scp to download a directory from a remote computer. <Arguments> remote_source_dir: The path to the directory to download (remote directory) local_dest_dir: Where do we put it on this computer? user: user to log in as remote_host: the ip/name of the machine we're connecting to. retry_on_refusal: Optional. Integer. Has number of times to retry the connection IF it was refused (built in to take care of not 'spamming' the remote server) connect_timeout: Optional. Integer. Time in seconds for ssh to timeout if no response was received. <Exceptions> None. <Side Effects> None. <Returns> Tuple. (out, err, returncode) Details: out: stdout from scp err: err from ssh returncode: scp's exit code """ # the dir one level 'up' from the our destination dir must exist, so lets # grab it by doing some string math.. remove trailing . and then partition local_dest_dir_parent, junk, morejunk = local_dest_dir.strip('/').rpartition('/') # if our local destination directory does not exist then complain. if not os.path.isdir(local_dest_dir_parent): deploy_logging.logerror(local_dest_dir) deploy_logging.logerror(local_dest_dir_parent) deploy_logging.logerror('Problem with local directory: it does not exist!') raise Exception('Please check calling method.') # get the scp handle scp_proc_handle = subprocess.Popen('scp -r -o BatchMode=yes -o '+ 'ConnectTimeout='+str(connect_timeout)+' -o StrictHostKeyChecking=no '+\ user+'@'+remote_host+':'+remote_source_dir+\ ' '+local_dest_dir, shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE) # the pid of the scp process just started scp_proc_pid = scp_proc_handle.pid # start thread to monitor timeouts (on another thread) deploy_threading.monitor_timeout(scp_proc_pid, int(connect_timeout), remote_host, user) # execute string and block this thread until done... out, err = scp_proc_handle.communicate('') returncode = scp_proc_handle.returncode # retry if conn. was refused? if retry_on_refusal: # check if we got a connection refused. if we did, could be cuz we're # spamming the server, so sleep and then try again didwesleep = sleep_on_conn_refused(out, err, retry_on_refusal, remote_host) # we slept, so call function again and try to execute if didwesleep: # run again, but this time decrement retry counter out, err, returncode = remote_download_dir(remote_source_dir, local_dest_dir, user, remote_host, retry_on_refusal - 1, connect_timeout = default_connection_timeout) # format the string out, err = deploy_logging.format_stdout_and_err(out, err) return out, err, returncode
def remote_shellexec(command_string, user, remote_host, retry_on_refusal = 3, connect_timeout = default_connection_timeout): """ <Purpose> This uses ssh to execute the command_string on user@remote_host. <Arguments> command_string: the command string we'll execute on the remote machine. Commands are executed sequentially. user: user to log in as remote_host: the ip/name of the machine we're connecting to. retry_on_refusal: Optional. Integer. Has number of times to retry the connection IF it was refused (built in to take care of not 'spamming' the remote server) connect_timeout: Optional. String. Time in seconds for ssh to timeout if no response was received. <Exceptions> None. <Side Effects> None. <Returns> Tuple. (out, err, returncode) Details: out: stdout from ssh err: err from ssh returncode: ssh's exit code """ # execute the string on the remote computer by sshing # ssh_proc is the handle to our ssh session process # -T is needed because otherwise you get a weird error from ssh (even though # everything executes flawlessly. -T specifies not allocate a tty (which # is fine for our purposes. -i specifies rsa priv key file path # StrictHostKeyChecking=no tells ssh to connect to the remote host even if # the remote host's ip is not trusted (cached) in known_hosts file. ssh_proc_handle = subprocess.Popen('ssh -T -o BatchMode=yes -o ConnectTimeout='+\ str(connect_timeout)+' -o StrictHostKeyChecking=no '\ ' '+user+'@'+remote_host, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # get the process ID ssh_proc_pid = ssh_proc_handle.pid # start thread to monitor timeouts (on another thread) deploy_threading.monitor_timeout(ssh_proc_pid, int(connect_timeout), remote_host, user) # execute string and block this thread until done... out, err = ssh_proc_handle.communicate(command_string) returncode = ssh_proc_handle.returncode # retry if conn. was refused? (if we have retries left) if retry_on_refusal: # check if we got a connection refused. if we did, could be cuz we're # spamming the server, so sleep and then try again didwesleep = sleep_on_conn_refused(out, err, retry_on_refusal, remote_host) # we slept, so call function again and try to execute if didwesleep: # run again, but this time decrement retry counter out, err, returncode = remote_shellexec(command_string, user, remote_host, retry_on_refusal - 1, connect_timeout) # format the string out, err = deploy_logging.format_stdout_and_err(out, err) return out, err, returncode
print "ERROR: Specified instructional machine filepath is"+\ " not a valid file ("+opt[1]+")" return else: print 'Invalid instructional machine path specified, not going to die.' return # print intro print_notification() # Execute the tar creation script out, err, returncode = shellexec2('python create_tar.py ' + custom_script_name) # Just formatting the out and err from executing the shell script. out, err = deploy_logging.format_stdout_and_err(out, err) # print if not empty if out: print out if err: print err print deploy_logging.sep # if all went sucessfully.. if returncode == 0: # setup all the directories.. prep_local_dirs(keep) print "Entering upload and execution script... (this may take a while)"