def PATCH_VM(name): logfile = "./logs/%s.log" % name i = get_index(name) redir_port = 9900 + i ssh.sync_folder(redir_port, "./patches/%s" % sys.argv[3], "/tmp/", logfile) ssh.execute(redir_port, "chmod a+x /tmp/%s" % sys.argv[3], logfile) ssh.execute(redir_port, "/tmp/%s" % sys.argv[3], logfile)
def cleanup_log(hostname, username, password, port=22): import ssh ssh.execute( '''cd /var/log/zstack; tar --ignore-failed-read -zcf zstack-logs-`date +%y%m%d-%H%M%S`.tgz *.log.* *.log; find . -name "*.log"|while read file; do echo "" > $file; done''', hostname, username, password, port=port)
def poke_puppet_agent(hostname, username, password, node_name, master_certname='zstack'): with lock.FileLock(hostname): ssh.execute('''ip=`env | grep SSH_CLIENT | cut -d '=' -f 2 | cut -d ' ' -f 1`; [ $ip == ::1 ] && ip=127.0.0.1; sed -i "/%s/d" /etc/hosts; echo "$ip %s" >> /etc/hosts''' % (master_certname, master_certname), hostname, username, password) (retcode, output, err) = ssh.execute('puppet agent --certname %s --no-daemonize --onetime --waitforcert 60 --server %s --verbose --detailed-exitcodes' % (node_name, master_certname), hostname, username, password, exception_if_error=False) if retcode == 4 or retcode == 6 or retcode == 1: raise PuppetError('failed to run puppet agent:\nstdout:%s\nstderr:%s\n' % (output, err)) logger.debug(output)
def remote(command, halt_on_output = False): #tell what happens out.log(command, 'remote', out.LEVEL_VERBOSE) #choose the correct command system if engine.COMMAND_SYSTEM == 'PHP': php.execute(command, halt_on_output) elif engine.COMMAND_SYSTEM == 'SSH': ssh.execute(command) else: #even less implemented out.log("Error Unknown COMMAND_SYSTEM " + engine.COMMAND_SYSTEM, 'remote', out.LEVEL_ERROR) engine.quit()
def PROVISION_VM(name): logfile = "./logs/%s.log" % name i = get_index(name) redir_port = 9900 + i mc = machine_list[i] # Set hostname ssh.execute(redir_port, "hostname %s" % name, logfile) # ifconfig eth1 ssh.execute(redir_port, "dhclient eth1", logfile) env_vars = dict() env_vars.update({'hostname': name}) env_vars.update({'env': os.environ.get("env")}) env_vars.update( {"consul_discovery_token": os.environ.get("consul_discovery_token")}) # Process the provision.cmd file fsync_list, lines = process_provision.process("./provision.cmd", env_vars) for k, v in fsync_list.iteritems(): ssh.sync_folder(redir_port, v, k, logfile) # Write the lines to remote tmp file f = open("/tmp/%s.provision.sh" % name, "w") for line in lines: f.write(line) f.close() # Transfer the file to remote ssh.sync_folder(redir_port, "/tmp/%s.provision.sh" % name, "/tmp/", logfile) # Set execute permission and run ssh.execute(redir_port, "chmod a+x /tmp/%s.provision.sh" % name, logfile) ssh.execute(redir_port, "bash -l /tmp/%s.provision.sh" % name, logfile)
def provision_vm(name,i): logfile = "./logs/%s.log" % name redir_port = 9900 + i print ("Waiting for %s to boot" % name) wait_for_vm(i) # Set hostname ssh.execute(redir_port, "hostname %s" % name, logfile) # ifconfig eth1 (primary network must be eth1 always in qemu command) ssh.execute(redir_port, "dhclient eth1", logfile) env_vars = dict() env_vars.update({'hostname': name}) env_vars.update({'env': os.environ.get("env")}) env_vars.update({"consul_discovery_token": os.environ.get("consul_discovery_token")}) # Process the provision.cmd file fsync_list,lines = process_provision.process("./provision.cmd", env_vars) for k,v in fsync_list.iteritems(): ssh.sync_folder(redir_port, v, k, logfile) # Write the lines to remote tmp file f = open("/tmp/%s.provision.sh" % name, "w") for line in lines: f.write(line) f.close() # Transfer the file to remote ssh.sync_folder(redir_port, "/tmp/%s.provision.sh" % name, "/tmp/", logfile) # Set execute permission and run ssh.execute(redir_port, "chmod a+x /tmp/%s.provision.sh" % name, logfile) ssh.execute (redir_port, "bash -l /tmp/%s.provision.sh" % name, logfile)
def get_guestip(name): i = get_index(name) redir_port = 9900 + i mc = machine_list[i] logfile = "/tmp/puppet-ipaddress-%d" % redir_port ssh.execute(redir_port, "ifconfig eth1", logfile) f = open(logfile, "r") for line in f: if "inet addr:" in line: m = re.search(r"inet addr:(.*?) B", line) if m: print "ip:", m.group(1) f.close() return m.group(1) return None
def execute_salt_state(hostname, username, password, state_name, master_name, machine_id=None): with lock.FileLock(hostname): ssh.execute('''ip=`env | grep SSH_CLIENT | cut -d '=' -f 2 | cut -d ' ' -f 1`; [ $ip == ::1 ] && ip=127.0.0.1; sed -i "/%s/d" /etc/hosts; sed -i "/$ip/d" /etc/hosts; echo "$ip %s" >> /etc/hosts''' % (master_name, master_name), hostname, username, password) if not machine_id: (retcode, machine_id, err) = ssh.execute('cat /sys/class/dmi/id/product_uuid', hostname, username, password, exception_if_error=False) if not machine_id: raise SaltError("Can't find machine-id on %s" % hostname) machine_id = machine_id.strip() if not wait_for_salt_minion_daemon(machine_id, 1, False): ssh.execute('which salt-minion; [ $? -ne 0 ] && curl -L http://bootstrap.saltstack.org | sudo sh ;sed -i "^id/d" /etc/salt/minion; sed -i "^master/d" /etc/salt/minion; echo "id: %s" >>/etc/salt/minion; echo "master: %s" >> /etc/salt/minion; rm -f /etc/salt/pki/minion/minion_master.pub ; service salt-minion restart' % (machine_id, master_name), hostname, username, password, exception_if_error=False) wait_for_salt_minion_daemon(machine_id) print 'salt %s %s' % (machine_id, state_name) output = shell.call('salt --out=json %s %s' % (machine_id, state_name)) if not is_salt_failed(output): print '%s' % output print "salt has deployed %s" % state_name else: raise SaltError('salt execution failure: %s' % output)
def PROVISION_VM(name): logfile = "./logs/%s.log" % name i = get_index(name) redir_port = 9900 + i mac_address = "00:11:22:33:44:" + str(55 + i) ssh.execute(redir_port, "dhclient eth1", logfile) print("Fixing dns server") add_dns_record(name, get_guestip(mac_address)) if name == "bootstrap1": print("Fixing dns server for consul service") add_dns_record( "%s.service.consuldiscovery.linux2go.dk" % os.environ.get("consul_discovery_token"), get_guestip(mac_address)) if name == "haproxy1": fix_cloud_dns(get_guestip(mac_address)) # For separate data and control plane if name == 'cp1' or name == 'cp2' or name == 'gcp1': ssh.execute(redir_port, "dhclient eth2", logfile) env_vars = dict() env_vars.update({'hostname': "%s" % name}) fsync_list, lines = process_provision.process("./provision.cmd", env_vars) for k, v in fsync_list.iteritems(): ssh.sync_folder(redir_port, v, k, logfile) # Write the lines to remote tmp file f = open("/tmp/%s.provision.sh" % name, "w") for line in lines: f.write(line) f.close() # Transfer the file to remote ssh.sync_folder(redir_port, "/tmp/%s.provision.sh" % name, "/tmp/", logfile) # Set execute permission and run ssh.execute(redir_port, "sudo 'hostname %s'" % name, logfile) ssh.execute(redir_port, "chmod a+x /tmp/%s.provision.sh" % name, logfile) ssh.execute(redir_port, "/tmp/%s.provision.sh" % name, logfile)
def execute_salt_state(hostname, username, password, state_name, master_name, machine_id=None): with lock.FileLock(hostname): ssh.execute( '''ip=`env | grep SSH_CLIENT | cut -d '=' -f 2 | cut -d ' ' -f 1`; [ $ip == ::1 ] && ip=127.0.0.1; sed -i "/%s/d" /etc/hosts; sed -i "/$ip/d" /etc/hosts; echo "$ip %s" >> /etc/hosts''' % (master_name, master_name), hostname, username, password) if not machine_id: (retcode, machine_id, err) = ssh.execute('cat /sys/class/dmi/id/product_uuid', hostname, username, password, exception_if_error=False) if not machine_id: raise SaltError("Can't find machine-id on %s" % hostname) machine_id = machine_id.strip() if not wait_for_salt_minion_daemon(machine_id, 1, False): ssh.execute( 'which salt-minion; [ $? -ne 0 ] && curl -L http://bootstrap.saltstack.org | sudo sh ;sed -i "^id/d" /etc/salt/minion; sed -i "^master/d" /etc/salt/minion; echo "id: %s" >>/etc/salt/minion; echo "master: %s" >> /etc/salt/minion; rm -f /etc/salt/pki/minion/minion_master.pub ; service salt-minion restart' % (machine_id, master_name), hostname, username, password, exception_if_error=False) wait_for_salt_minion_daemon(machine_id) print 'salt %s %s' % (machine_id, state_name) output = shell.call('salt --out=json %s %s' % (machine_id, state_name)) if not is_salt_failed(output): print '%s' % output print "salt has deployed %s" % state_name else: raise SaltError('salt execution failure: %s' % output)
def poke_puppet_agent(hostname, username, password, node_name, master_certname='zstack'): with lock.FileLock(hostname): ssh.execute( '''ip=`env | grep SSH_CLIENT | cut -d '=' -f 2 | cut -d ' ' -f 1`; [ $ip == ::1 ] && ip=127.0.0.1; sed -i "/%s/d" /etc/hosts; echo "$ip %s" >> /etc/hosts''' % (master_certname, master_certname), hostname, username, password) (retcode, output, err) = ssh.execute( 'puppet agent --certname %s --no-daemonize --onetime --waitforcert 60 --server %s --verbose --detailed-exitcodes' % (node_name, master_certname), hostname, username, password, exception_if_error=False) if retcode == 4 or retcode == 6 or retcode == 1: raise PuppetError( 'failed to run puppet agent:\nstdout:%s\nstderr:%s\n' % (output, err)) logger.debug(output)
def test_execute_zero_exit(self): def on_executed(process): process.stdout.add_callback(data.append) return process.exited.next_event() def on_exited(reason): self.assertIsNone(reason) data = [] proc_defer = ssh.execute("echo 'hello'", test_credentials.ssh_hostname, test_credentials.ssh_user, private_key_files=[test_credentials.ssh_privatekey]) proc_defer.addCallback(on_executed) proc_defer.addCallback(on_exited) return proc_defer
def test_execute(self): def on_executed(process): process.stdout.add_callback(data.append) return process.exited.next_event() def on_exited(_): self.assertTrue("hello" in "".join(bToStr(data)), msg="Expected to find 'hello' in output. Got: %s" % repr(bToStr(data))) data = [] proc_defer = ssh.execute("echo 'hello'", test_credentials.ssh_hostname, test_credentials.ssh_user, private_key_files=[test_credentials.ssh_privatekey]) proc_defer.addCallback(on_executed) proc_defer.addCallback(on_exited) return proc_defer
def test_execute_exitcode_42(self): def on_executed(process): return process.exited.next_event() def on_success(_): self.fail("Expected failure due to non-zero exit code.") def on_fail(reason): self.assertEqual(42, reason.value.exitCode) proc_defer = ssh.execute("exit 42", test_credentials.ssh_hostname, test_credentials.ssh_user, private_key_files=[test_credentials.ssh_privatekey]) proc_defer.addCallback(on_executed) proc_defer.addCallbacks(on_success, on_fail) return proc_defer
def test_execute_stderr(self): def on_executed(process): process.stderr.add_callback(data.append) return process.exited.next_event() def on_exited(_): self.assertTrue("hello" in "".join(data), msg="Expected to find 'hello' in output. Got: %s" % repr(data)) data = [] proc_defer = ssh.execute("python -c \"import sys;sys.stderr.write('hello')\"", test_credentials.ssh_hostname, test_credentials.ssh_user, private_key_files=[test_credentials.ssh_privatekey]) proc_defer.addCallback(on_executed) proc_defer.addCallback(on_exited) return proc_defer
def test_execute_kill(self): def on_executed(process): d = process.exited.next_event() process.kill() return d def on_success(_): self.fail("Expected failure due to kill().") def on_fail(reason): self.assertTrue(reason.check(ConnectionLost), "reason is not of type ConnectionLost") proc_defer = ssh.execute("python -c 'import time;time.sleep(10)'", test_credentials.ssh_hostname, test_credentials.ssh_user, private_key_files=[test_credentials.ssh_privatekey]) proc_defer.addCallback(on_executed) proc_defer.addCallbacks(on_success, on_fail) return proc_defer
def provision_vm(name, i): logfile = "./logs/%s.log" % name redir_port = 9900 + i print("Waiting for %s to boot" % name) wait_for_vm(i) # Set hostname ssh.execute(redir_port, "hostname %s" % name, logfile) # ifconfig eth1 (primary network must be eth1 always in qemu command) ssh.execute(redir_port, "dhclient eth1", logfile) env_vars = dict() env_vars.update({'hostname': name}) env_vars.update({'env': os.environ.get("env")}) env_vars.update( {"consul_discovery_token": os.environ.get("consul_discovery_token")}) # Process the provision.cmd file fsync_list, lines = process_provision.process("./provision.cmd", env_vars) for k, v in fsync_list.iteritems(): ssh.sync_folder(redir_port, v, k, logfile) # Write the lines to remote tmp file f = open("/tmp/%s.provision.sh" % name, "w") for line in lines: f.write(line) f.close() # Transfer the file to remote ssh.sync_folder(redir_port, "/tmp/%s.provision.sh" % name, "/tmp/", logfile) # Set execute permission and run ssh.execute(redir_port, "chmod a+x /tmp/%s.provision.sh" % name, logfile) ssh.execute(redir_port, "bash -l /tmp/%s.provision.sh" % name, logfile)
def cleanup_log(hostname, username, password, port = 22): import ssh ssh.execute('''cd /var/log/zstack; tar --ignore-failed-read -zcf zstack-logs-`date +%y%m%d-%H%M%S`.tgz *.log.* *.log; find . -name "*.log"|while read file; do echo "" > $file; done''', hostname, username, password, port=port)