def connect(self): if 'host' in self.config['target']: host = self.config['target']['host'] else: host = raw_input('Target host: ') if 'username' in self.config['target']: username = self.config['target']['username'] else: username = raw_input('Target username: '******'password' in self.config['target']: password = self.config['target']['password'] else: password = getpass() self.target = SSHConnection(host, username, password)
def __init__(self, node): super(PostgresManager, self).__init__() self.node = node if not node['local'] == 'true': self.connection = SSHConnection( node['hostname'], node['username'], node['password'])
def Upload_house(): if request.method == 'POST': ## step 1. load the file f = request.files['file'] ## step 2. save the file f.save(secure_filename(f.filename)) ## step 3. connect the service from ssh ### SSHConnection is the Class in ssh.py,its parameters contains(host=str, port=int, username=str,pwd=str) ssh_2 = SSHConnection(dic_s['host'], dic_s['port'], dic_s['username'], dic_s['password']) ssh_2.connect() ssh_2.upload( str(os.getcwd()) + '/' + str(f.filename), dic_s['remote work directory path'] + '/lgbm/train1.csv') ## ssh_2.upload(LOCAL FILE PATH,REMOTE PATH) ##step 4. run data_v.py to get data of HousePrice global mydata ## mydata is the visualization data of HousePrice data mydata = eval(ssh_2.cmd('cd lgbm;python data_v.py')) return render_template('Housedata_description.html', mydata=mydata) else: return render_template('Upload_house.html')
def setup_ssh(self, host, username, password, port, destfile): """ Open SSH connection """ print(host, username, password, port) self.ssh_connection = SSHConnection(host, username, password, port) print("Log SSH:", self.ssh_connection) self.destfile = destfile self.ssh_open = True
class Thermon(object): def __init__(self, config): self.config = config self.connect() self.outputs = [] self.add_output(CSVLogger) self.add_output(MatplotlibPlotter) def add_output(self, constructor): self.outputs.append(constructor(self.config)) def connect(self): if 'host' in self.config['target']: host = self.config['target']['host'] else: host = raw_input('Target host: ') if 'username' in self.config['target']: username = self.config['target']['username'] else: username = raw_input('Target username: '******'password' in self.config['target']: password = self.config['target']['password'] else: password = getpass() self.target = SSHConnection(host, username, password) def run(self): self.start_time = time.time() while True: try: for category in self.config['probes'].keys(): timestamp, data = self.poll_data(category) for output in self.outputs: output.write(category, timestamp, data) time.sleep(0.5) except KeyboardInterrupt: print 'Quitting' sys.exit(0) def poll_data(self, category): polltime = round(time.time() - self.start_time, 3) probedata = {} probes = self.config['probes'][category]['probes'] for probe, path in probes.iteritems(): data_raw = self.target.call('cat {}'.format(path))[0].strip() probedata[probe] = int(data_raw) return polltime, probedata
def __init__( self, _id: str, address: str, username: str, ssh_password: str, jobs_db, skip_gpus: Sequence[int] = (), gpu_runner_on: bool = False, app=None, ): self._id = _id self.address = address self.username = username self.jobs_db = jobs_db self.skip_gpus = skip_gpus self.gpu_runner_on = gpu_runner_on self.app = app self.new_processes = [] self._client = SSHConnection(self.address, self.username, ssh_password, auto_add_host=True) self._client_lock = Lock()
def upload_s(): if request.method == 'POST': f = request.files['file'] f.save(secure_filename(f.filename)) print(dic_s['password']) ssh_1=SSHConnection(dic_s['host'],dic_s['port'],dic_s['username'],dic_s['password']) ssh_1.connect() ssh_1.upload(str(os.getcwd()) + '\\' + str(f.filename),dic_s['remote work directory path'] + '/FlaskIndexPrediction/data/data.csv') # INPUT YOUR PATH return render_template('index_s.html') else: return render_template('upload_s.html')
def upload_h(): if request.method == 'POST': f = request.files['file'] f.save(secure_filename(f.filename)) print(f.filename) global ssh ssh = SSHConnection(dic_s['host'],dic_s['port'],dic_s['username'],dic_s['password']) ssh.connect() ssh.upload(str(os.getcwd()) + '\\' + str(f.filename),dic_s['remote work directory path'] + '/lgbm/train1.csv') global mydata mydata = eval(ssh.cmd('cd py2;source bin/activate;cd ..;cd lgbm;python data_v.py')) return render_template('index_h.html',mydata=mydata) else: return render_template('upload_h.html')
def connection(self, user_name=None, password=None, private_key=None): """ connection target host by ssh protocal :param user_name: :param password: :param private_key: :return: """ hosts = set() map(lambda _host: hosts.add(parse_host_port(_host[0])[0]), self.config["ps_hosts"]) map(lambda _host: hosts.add(parse_host_port(_host[0])[0]), self.config["worker_hosts"]) _ssh_clients = {} for host in hosts: logger.debug("ssh connect to host[%s]" % host) _ssh_clients[host] = SSHConnection(host, username=user_name, password=password, private_key=private_key) return _ssh_clients
def Upload_stock(): if request.method == 'POST': ## step 1. load the file f = request.files['file'] ## step 2. save the file f.save(secure_filename(f.filename)) ## step 3. connect the service from ssh ### SSHConnection is the Class in ssh.py,its parameters contains(host=str, port=int, username=str,pwd=str) ssh_1 = SSHConnection(dic_s['host'], dic_s['port'], dic_s['username'], dic_s['password']) ssh_1.connect() ssh_1.upload( str(os.getcwd()) + '/' + str(f.filename), dic_s['remote work directory path'] + '/FlaskIndexPrediction/data/data.csv') # INPUT YOUR PATH ## ssh_1.upload(LOCAL FILE PATH,REMOTE PATH) return render_template('Choose_LSTM.html') else: return render_template('Upload_stock.html')
class Machine: def __init__( self, _id: str, address: str, username: str, ssh_password: str, jobs_db, skip_gpus: Sequence[int] = (), gpu_runner_on: bool = False, app=None, ): self._id = _id self.address = address self.username = username self.jobs_db = jobs_db self.skip_gpus = skip_gpus self.gpu_runner_on = gpu_runner_on self.app = app self.new_processes = [] self._client = SSHConnection(self.address, self.username, ssh_password, auto_add_host=True) self._client_lock = Lock() def dashboard_data(self) -> Dict[str, Any]: return { "_id": self._id, "address": self.address, "username": self.username, "gpu_runner_on": self.gpu_runner_on, } def execute(self, command: str, codec: str = "utf-8") -> str: """ Runs `command` using the SSHConnection for this Machine and returns stdout :param command: *single-line* command to run :param codec: codec to use to decode the standard output from running `command` :returns: decoded stdout """ try: with self._client_lock: return self._client.execute(command, codec) except: if self.app: self.app.logger.info(traceback.format_exc()) raise def start(self, sleep_time: int = 30): def handle_machine(machine: Machine, sleep_time: int): while True: if machine.gpu_runner_on: if self.app: self.app.logger.info( f"Checking for jobs on {self.address}.") machine.start_jobs() sleep(sleep_time) thread = Thread(target=lambda: handle_machine(self, sleep_time), daemon=True) thread.start() def start_jobs(self, n_passes: int = 2, keep_time: int = 60) -> None: """ :param n_passes: number of times to query the state of the GPUs; the utilization currently used on each GPU is assumed to be the mean across these passes, and the memory used is the max :param keep_time: how long to keep a started process in the new_processes list before removing it (a process is removed immediately if we see it running). Leaving a process in the new_processes list means that we assume that the resources it has requested are already "reserved" even though we don't see them being used yet on the GPU. Removing a processes from this list lets other processes be started with those resources. (`keep_time` is in seconds) """ while True: # place jobs for this machine until you can't place any more job = self.jobs_db.find_one({"machine": self._id}, sort=[("util", 1)]) if not job: # no more queued jobs for this machine break # check if there's a gpu you can run this job on (enough memory and util free) gpus = {} new_processes = self.new_processes for _ in range(n_passes): gpu_info = get_gpus_from_info_string( self.execute(_smi_command)) for gpu in gpu_info: try: gpus[gpu.idx].append(gpu) except KeyError: gpus[gpu.idx] = [gpu] # TODO - remove processes that have shown up on the GPU # if a process doesn't show up on the GPU after enough time, assume it had an error and crashed; remove now = time() new_processes = [ process for process in new_processes if now - process.timestamp < keep_time ] # subtract mem and util used by new processes from that which is shown to be free mem_newly_used = {gpu_num: 0 for gpu_num in gpus} util_newly_used = {gpu_num: 0 for gpu_num in gpus} for process in new_processes: mem_newly_used[process.gpu_num] += process.mem_needed util_newly_used[process.gpu_num] += process.util_needed # set mem_used to max from each pass, util_used to mean gpus = [ _GPU( idx=num, mem_used=max([gpu.mem_used for gpu in gpu_list]) + mem_newly_used[num], mem_total=gpu_list[0].mem_total, util_used=sum([gpu.util_used for gpu in gpu_list]) / n_passes - util_newly_used[num], ) for (num, gpu_list) in gpus.items() ] gpus = [ gpu for gpu in gpus if gpu.mem_free >= job["mem"] and gpu.util_free >= job["util"] ] try: best_gpu = max(gpus, key=lambda gpu: gpu.util_free) except ValueError: # max gets no gpus because none have enough mem_free and util_free if self.app: self.app.logger.info( f"No free GPUs to start jobs on {self.address}!") break # can't place anything on this machine job_cmd = job["cmd"].format(best_gpu.idx) if self.app: self.app.logger.info(f"Starting job: {job_cmd} ({self._id})") # make sure to background the script # surrounding w/ () executes in a subshell so "Done ..." # isn't printed when the job finishes output = self.execute(f"({job_cmd} >> ~/.gpu_log 2>&1 &)") new_processes.append( _Process( job_cmd, best_gpu.idx, mem_needed=job["mem"], util_needed=job["util"], timestamp=time(), )) self.new_processes = new_processes # this job is running, so remove it from the list self.jobs_db.remove({"_id": job["_id"]})
def upload_p(): def get(name): return request.values.get(name) if request.method == 'POST': dic={} dic['e'] = int(get('e')) dic['lb'] = int (get('lb')) dic['lr'] = float (get('lr')) dic['tp'] = float (get('tp')) ssh_s=SSHConnection(dic_s['host'],dic_s['port'],dic_s['username'],dic_s['password']) ssh_s.connect( ) s='cd py2;source bin/activate;cd ..;cd FlaskIndexPrediction;python main.py -e {e} -lb {lb} -lr {lr} -tp{tp}' ssh_s.cmd(s.format(e=dic['e'],lb=dic['lb'],lr=dic['lr'],tp=dic['tp'])) ssh_s.download(dic_s['remote work directory path'] + '/FlaskIndexPrediction/output/outputTest.csv', str(os.getcwd()) + '\\' + 'outputTest.csv') ssh_s.download(dic_s['remote work directory path'] + '/FlaskIndexPrediction/output/outputTrain.csv',str(os.getcwd()) + '\\' + 'outputTrain.csv') ssh_s.download(dic_s['remote work directory path'] + '/FlaskIndexPrediction/output/outputTrainTest.csv',str(os.getcwd()) + '\\' + 'outputTrainTest.csv') outputTest = pd.read_csv('outputTest.csv') outputTrain = pd.read_csv('outputTrain.csv') outputTrainTest = pd.read_csv('outputTrainTest.csv') Test_time = outputTest['Unnamed: 0'] Test_origin = outputTest['origin'] Test_predict = outputTest['predict'] Train_time = outputTrain['Unnamed: 0'] Train_origin = outputTrain['origin'] Train_predict = outputTrain['predict'] TrainTest_time = outputTrainTest['Unnamed: 0'] TrainTest_origin = outputTrainTest['origin'] TrainTest_predict = outputTrainTest['predict'] stock_data={'Test_time':Test_time,'Test_origin':Test_origin, 'Test_predict':Test_predict,'Train_time':Train_time, 'Train_origin':Train_origin,'Train_predict':Train_predict, 'TrainTest_time':TrainTest_time,'TrainTest_origin':TrainTest_origin, 'TrainTest_predict':TrainTest_predict} return render_template('ZhangYD.html',stock_data = stock_data) else: return render_template('index_p.html')
def Stockdata_parameter(): def get( name ): ## get values from the HTML part ,'name' is the 'id' of HTML <input> return request.values.get(name) if request.method == 'POST': ## step 1. get the parameters of stock data from HTML stock_parameter = { } ## stock_parameter is a dic to store teh parameters user input in HTML stock_parameter['e'] = int(get('e')) stock_parameter['lb'] = int(get('lb')) stock_parameter['lr'] = float(get('lr')) stock_parameter['tp'] = float(get('tp')) ## step 2. running the LSTM module in service ssh_s = SSHConnection(dic_s['host'], dic_s['port'], dic_s['username'], dic_s['password']) ssh_s.connect() s = 'cd FlaskIndexPrediction;python main.py -e {e} -lb {lb} -lr {lr} -tp{tp}' ssh_s.cmd( s.format(e=stock_parameter['e'], lb=stock_parameter['lb'], lr=stock_parameter['lr'], tp=stock_parameter['tp'])) ## step 3.download the main.py output file to local ssh_s.download( dic_s['remote work directory path'] + '/FlaskIndexPrediction/output/outputTest.csv', str(os.getcwd()) + '/module/YD/' + 'outputTest.csv') ssh_s.download( dic_s['remote work directory path'] + '/FlaskIndexPrediction/output/outputTrain.csv', str(os.getcwd()) + '/module/YD/' + 'outputTrain.csv') ssh_s.download( dic_s['remote work directory path'] + '/FlaskIndexPrediction/output/outputTrainTest.csv', str(os.getcwd()) + '/module/YD/' + 'outputTrainTest.csv') ## step 4. read csv from local outputTest = pd.read_csv('module/YD/outputTest.csv') outputTrain = pd.read_csv('module/YD/outputTrain.csv') outputTrainTest = pd.read_csv('module/YD/outputTrainTest.csv') Test_time = outputTest['Unnamed: 0'] Test_origin = outputTest['origin'] Test_predict = outputTest['predict'] Train_time = outputTrain['Unnamed: 0'] Train_origin = outputTrain['origin'] Train_predict = outputTrain['predict'] TrainTest_time = outputTrainTest['Unnamed: 0'] TrainTest_origin = outputTrainTest['origin'] TrainTest_predict = outputTrainTest['predict'] stock_data = { 'Test_time': Test_time, 'Test_origin': Test_origin, 'Test_predict': Test_predict, 'Train_time': Train_time, 'Train_origin': Train_origin, 'Train_predict': Train_predict, 'TrainTest_time': TrainTest_time, 'TrainTest_origin': TrainTest_origin, 'TrainTest_predict': TrainTest_predict } return render_template('Stock_final_output.html', stock_data=stock_data) else: return render_template('Stockdata_parameter.html')
def Housedata_parameter(): def get( name ): ## get values from the HTML part ,'name' is the 'id' of HTML <input> return request.values.get(name) if request.method == 'POST': ##step 1. get housedata parameters from HTML LotArea = get("LotArea") Neighborhood = get("Neighborhood") YearBuilt = get("YearBuilt") GrLivArea = get("GrLivArea") Street = get("Street") Utilities = get("Utilities") LotConfig = get("LotConfig") HouseStyle = get("HouseStyle") RoofStyle = get("RoofStyle") SaleType = get('SaleType') SaleCondition = get('SaleCondition') lgbm_data = [ LotArea, Street, Utilities, LotConfig, Neighborhood, HouseStyle, YearBuilt, RoofStyle, GrLivArea, SaleType, SaleCondition ] lgbm_data_1 = ",".join(lgbm_data) ##step 2. Data Processing. def getSortedValues( row ): ## make data in a certain order('row' is the key for the data need to be sorted) sortedValues = [] keys = row.keys() keys.sort() for key in keys: sortedValues.append(row[key]) return sortedValues rows = [ { 'Column1': 1, 'Column2': LotArea, 'Column3': Street, 'Column4': Utilities, 'Column5': LotConfig, 'Column6': Neighborhood, 'Column7': HouseStyle, 'Column8': YearBuilt, 'Column9': RoofStyle, 'Column10': GrLivArea, 'Column11': SaleType, 'Column12': SaleCondition }, ] names = { 'Column1': 'Id', 'Column2': 'LotArea', 'Column3': 'Street', 'Column4': 'Utilities', 'Column5': 'LotConfig', 'Column6': 'Neighborhood', 'Column7': 'HouseStyle', 'Column8': 'YearBuilt', 'Column9': 'RoofStyle', 'Column10': 'GrLivArea', 'Column11': 'SaleType', 'Column12': 'SaleCondition' } ## step 3. write the sorted data in a csv file('test.csv') fileobj = open("module/YX/test.csv", 'wb') fileobj.write('\xEF\xBB\xBF') writer = csv.writer(fileobj) sortedValues = getSortedValues(names) writer.writerow(sortedValues) for row in rows: sortedValues = getSortedValues(row) print(sortedValues) writer.writerow(sortedValues) fileobj.close() ## step 4. upload test.csv ssh_3 = SSHConnection(dic_s['host'], dic_s['port'], dic_s['username'], dic_s['password']) ssh_3.connect() ssh_3.upload( str(os.getcwd()) + '/module/YX/' + 'test.csv', '/root/lgbm/test.csv') ## step 5. running LGBM moudle to process the data show = ssh_3.cmd('cd lgbm;python lgbm.py') show_l = show.split('\n') ## the output of lgbm.py in terminal # print "showl_l" # print show_l pred_y = eval(show_l[-2]) ## the price output in list[] # print "pred_y" # print pred_y pred_y_show = pred_y[0] ## the price output in float # print "pred_y_show" # print pred_y_show quantitative = re.findall( "\d+", show_l[3])[0] ## number of quantitative features qualitative = re.findall( "\d+", show_l[3])[1] ## number of qualitative features train_x1 = show_l[6] ## the shape of train_X(1018,11) # print "train_x1" # print train_x1 train_y1 = show_l[7] ## the shape of train_y(1018,) # print "train_y1" # print train_y1 test_x1 = show_l[8] ## the shape of test_x(1,11) # print "text_x1" # print test_x1 return render_template('House_final_output.html', pred_y=round(pred_y_show, 2), train_x1=train_x1, train_y1=train_y1, test_x1=test_x1, quantitative=quantitative, qualitative=qualitative) else: return render_template('Housedata_parameter.html', mydata=mydata)
class PostgresManager(object): """docstring for PostgresManager""" def __init__(self, node): super(PostgresManager, self).__init__() self.node = node if not node['local'] == 'true': self.connection = SSHConnection( node['hostname'], node['username'], node['password']) def run(self, cmd): if self.node['local'] == 'true': return local.shell(cmd) else: return self.connection.execute(cmd) def start(self): self.run('sudo /etc/init.d/postgresql start') def stop(self): self.run('sudo /etc/init.d/postgresql stop') def restart(self): self.run('sudo /etc/init.d/postgresql restart') def reload(self): self.run('sudo /etc/init.d/postgresql reload') def write_file(self, path, data): if self.node['local'] == 'true': local.write_file(path, data) else: self.connection.upload_data(data, path) def read_file(self, path): if self.node['local'] == 'true': return local.read_file(path) else: return self.connection.download(path) def init(self, master): environ = os.environ.copy() environ['PATH'] = '/usr/lib/pgclust:' + environ['PATH'] self.stop() self.run('sudo pg_dropcluster %(pgversion)s %(cluster)s' % self.node) self.run('sudo pg_createcluster %(pgversion)s %(cluster)s' % self.node) self.run('sudo chown -R %(pguser)s:%(pguser)s /var/lib/postgresql/%(pgversion)s/%(cluster)s' % self.node) self.write_file('/tmp/pgpostgresql.conf', template.PG_CONFIG_TEMPLATE % self.node) self.write_file('/tmp/pgpg_hba.conf', template.PG_HBA_CONFIG_TEMPLATE % self.node) self.write_file('/tmp/pgrepmgr.conf', template.REPMGR_CONFIG_TEMPLATE % self.node) self.run('sudo cp /tmp/pgpostgresql.conf /etc/postgresql/%(pgversion)s/%(cluster)s/postgresql.conf' % self.node) self.run('sudo cp /tmp/pgpg_hba.conf /etc/postgresql/%(pgversion)s/%(cluster)s/pg_hba.conf' % self.node) self.run('sudo cp /tmp/pgrepmgr.conf /etc/postgresql/%(pgversion)s/%(cluster)s/repmgr.conf' % self.node) self.run('sudo chmod 644 /etc/postgresql/%(pgversion)s/%(cluster)s/*' % self.node) self.run('sudo rm /tmp/pgpostgresql.conf /tmp/pgpg_hba.conf /tmp/pgrepmgr.conf') self.run('sudo chown %(pguser)s:%(pguser)s -R /etc/postgresql' % self.node) self.write_file('/tmp/sshid_rsa' % self.node, local.read_file(self.node['privkey'])) self.write_file('/tmp/sshid_rsa.pub' % self.node, local.read_file(self.node['pubkey'])) self.write_file('/tmp/sshconfig', template.SSH_CONFIG) self.run('sudo -u %(pguser)s mkdir -p ~%(pguser)s/.ssh' % self.node) self.run('sudo cp /tmp/sshid_rsa ~%(pguser)s/.ssh/id_rsa' % self.node) self.run('sudo cp /tmp/sshid_rsa.pub ~%(pguser)s/.ssh/id_rsa.pub' % self.node) self.run('sudo cp /tmp/sshconfig ~%(pguser)s/.ssh/config' % self.node) self.run('sudo rm /tmp/sshconfig /tmp/sshid_rsa /tmp/sshid_rsa.pub') self.run('sudo chown %(pguser)s:%(pguser)s ~%(pguser)s/.ssh/config' % self.node) self.run('sudo chown %(pguser)s:%(pguser)s ~%(pguser)s/.ssh/id_rsa' % self.node) self.run('sudo chown %(pguser)s:%(pguser)s ~%(pguser)s/.ssh/id_rsa.pub' % self.node) self.run('sudo chmod 644 ~%(pguser)s/.ssh/config' % self.node) self.run('sudo chmod 600 ~%(pguser)s/.ssh/id_rsa' % self.node) self.run('sudo chmod 644 ~%(pguser)s/.ssh/id_rsa.pub' % self.node) if self.node['type'] == 'master': self.start() self.run('sudo -u %(pguser)s createuser --login --superuser repmgr' % self.node) self.run('sudo -u %(pguser)s createdb repmgr' % self.node) self.run('sudo -u %(pguser)s repmgr --verbose -f /etc/postgresql/%(pgversion)s/%(cluster)s/repmgr.conf master register' % self.node) else: self.run('sudo -u %(pguser)s rm -rf /var/lib/postgresql/%(pgversion)s/%(cluster)s' % self.node) self.run('sudo -u %(pguser)s mkdir /var/lib/postgresql/%(pgversion)s/%(cluster)s' % self.node) self.run('sudo -u %(pguser)s chmod 700 /var/lib/postgresql/%(pgversion)s/%(cluster)s' % self.node) self.run(('sudo -u %(pguser)s PATH="' + environ['PATH'] + '" repmgr --verbose --force -D /var/lib/postgresql/%(pgversion)s/%(cluster)s -d repmgr -p 5432 -U repmgr -R %(pguser)s standby clone ') % self.node + master['hostname']) self.start() self.run('sudo -u %(pguser)s repmgr -f /etc/postgresql/%(pgversion)s/%(cluster)s/repmgr.conf --verbose standby register' % self.node) def update_nodes(self, nodes): records = [] for node in nodes: node['hostname'] = socket.gethostbyname(node['hostname']) + '/32' records.append(template.REPLICATION_NODE_TEMPLATE % node) pg_hba = '\n\n'.join(records) + template.PG_HBA_CONFIG_TEMPLATE % self.node self.write_file('/tmp/pgpg_hba.conf' % self.node, pg_hba) self.run('sudo cp /tmp/pgpg_hba.conf /etc/postgresql/%(pgversion)s/%(cluster)s/pg_hba.conf' % self.node) self.run('sudo rm /tmp/pgpg_hba.conf') def update_keys(self, nodes): self.run('sudo -u %(pguser)s mkdir -p ~%(pguser)s/.ssh' % self.node) self.run('sudo chmod 700 ~%(pguser)s/.ssh' % self.node) keys = [] for node in nodes: key = local.read_file(node['pubkey']) keys.append(key) self.write_file('/tmp/sshauthorized_keys' % self.node, '\n'.join(keys)) self.run('sudo cp /tmp/sshauthorized_keys ~%(pguser)s/.ssh/authorized_keys' % self.node) self.write_file('/tmp/sshid_rsa' % self.node, local.read_file(self.node['privkey'])) self.write_file('/tmp/sshid_rsa.pub' % self.node, local.read_file(self.node['pubkey'])) self.run('sudo cp /tmp/sshid_rsa ~%(pguser)s/.ssh/id_rsa' % self.node) self.run('sudo cp /tmp/sshid_rsa.pub ~%(pguser)s/.ssh/id_rsa.pub' % self.node) self.run('sudo rm /tmp/sshauthorized_keys /tmp/sshid_rsa /tmp/sshid_rsa.pub') self.run('sudo chown %(pguser)s:%(pguser)s ~%(pguser)s/.ssh/authorized_keys' % self.node) self.run('sudo chown %(pguser)s:%(pguser)s ~%(pguser)s/.ssh/id_rsa' % self.node) self.run('sudo chown %(pguser)s:%(pguser)s ~%(pguser)s/.ssh/id_rsa.pub' % self.node) self.run('sudo chmod 600 ~%(pguser)s/.ssh/authorized_keys' % self.node) self.run('sudo chmod 600 ~%(pguser)s/.ssh/id_rsa' % self.node) self.run('sudo chmod 644 ~%(pguser)s/.ssh/id_rsa.pub' % self.node) def update(self, nodes): self.update_nodes(nodes.values()) self.update_keys(nodes.values()) self.reload()