def run(self, params, args): nodes = self.newdb.getNodesfromNames(args, managed_only=1, preload=['vm_defs']) if len(nodes) < 1: self.abort('must supply at least one host') threads = [] for node in nodes: # # the name of the physical host that will boot # this VM host # cmd = '/opt/rocks/bin/rocks report host vlan ' cmd += '%s | ' % node.name if node.vm_defs and node.vm_defs.physNode: cmd += self.getExecCommand(node.vm_defs.physNode.name) # this is called only on a single host, no need to use parralel os.system(cmd) else: cmd += '/opt/rocks/bin/rocks report script |' cmd += self.getExecCommand(node.name) p = Parallel(cmd, node.name) threads.append(p) p.start() for thread in threads: thread.join(timeout)
def run(self, params, args): # Get hostnames from args hosts = self.getHostnames(args) fname = '/etc/411-security/shared.key' # create a known hosts temporary file # this is so we don't contaminate the regular known hosts file # since this sync might change the host keys. (khfid, khfname) = tempfile.mkstemp() # Copy the 411 shared key to all nodes threads = [] for host in hosts: cmd = 'scp -q -o UserKnownHostsFile=%s %s root@%s:%s' % \ (khfname,fname, host, fname) p = Parallel(cmd, host) p.start() threads.append(p) for thread in threads: thread.join(timeout) if os.path.exists(khfname): os.unlink(khfname)
def run(self, params, args): nodes = self.newdb.getNodesfromNames(args, managed_only=1, preload = ['vm_defs']) if len(nodes) < 1: self.abort('must supply at least one host') threads = [] for node in nodes: # # the name of the physical host that will boot # this VM host # cmd = '/opt/rocks/bin/rocks report host vlan ' cmd += '%s | ' % node.name if node.vm_defs and node.vm_defs.physNode: cmd += self.getExecCommand(node.vm_defs.physNode.name) # this is called only on a single host, no need to use parralel os.system(cmd) else: cmd += '/opt/rocks/bin/rocks report script |' cmd += self.getExecCommand(node.name) p = Parallel(cmd, node.name) threads.append(p) p.start() for thread in threads: thread.join(timeout)
def run(self, params, args): hosts = self.getHostnames(args, managed_only=1) localhost = self.getHostnames(["localhost"])[0] threads = [] for host in hosts: # # get the attributes for the host # attrs = self.db.getHostAttrs(host) attrs = rocks.util.escapeStringForShell(str(attrs)) cmd = '/opt/rocks/bin/rocks report host firewall ' cmd += '%s | ' % host cmd += '/opt/rocks/bin/rocks report script ' cmd += 'attrs="%s" | ' % attrs cmd += self.getExecCommand(host, localhost) p = Parallel(cmd, host) threads.append(p) p.start() # # collect the threads # for thread in threads: thread.join(timeout) threads = [] for host in hosts: if rocks.version_major == '6': cmd = 'echo "/sbin/service iptables restart > /dev/null 2>&1" |' else: cmd = 'echo "/usr/bin/systemctl reload iptables > /dev/null 2>&1" |' cmd += self.getExecCommand(host, localhost) p = Parallel(cmd, host) threads.append(p) p.start() # # collect the threads # for thread in threads: thread.join(timeout) self.runPlugins(hosts)
def run(self, params, args): # lambda function to return first element in a tuple. # ("hostname", ) is the tuple that the sql statement # returns, and we want just "hostname" f = lambda(x): x[0] if len(args) > 0: hosts = self.getHostnames(args) else: self.db.execute('select distinct nodes.name ' +\ 'from zfs_replication, nodes where ' +\ 'zfs_replication.local_host=nodes.id') hosts = self.db.fetchall() hosts = map(f, hosts) for host in hosts: self.runPlugins(host) # Select all the replication servers, and try to set permissions # on their backup filesystems. Do this only for hosts that are under # our control. Since we cannot run the list of remote hosts # through the getHostnames function, we have to try and login # to every host without a password, and hope they connect. self.db.execute('select remote_host, remote_fs from ' +\ 'zfs_replication group by remote_host, remote_fs') rows = self.db.fetchall() cmd_line = '/opt/rocks/thumper/zfs-perms-setup.sh' ssh_cmd = 'ssh -xT -o NumberOfPasswordPrompts=0' threads = [] for (host, fs) in rows: cmd = '%s %s "%s %s"' % (ssh_cmd, host, cmd_line, fs) p = Parallel(cmd, host) threads.append(p) p.start() for thread in threads: thread.join(timeout)
def run(self, params, args): # lambda function to return first element in a tuple. # ("hostname", ) is the tuple that the sql statement # returns, and we want just "hostname" f = lambda (x): x[0] if len(args) > 0: hosts = self.getHostnames(args) else: self.db.execute('select distinct nodes.name ' +\ 'from zfs_replication, nodes where ' +\ 'zfs_replication.local_host=nodes.id') hosts = self.db.fetchall() hosts = map(f, hosts) for host in hosts: self.runPlugins(host) # Select all the replication servers, and try to set permissions # on their backup filesystems. Do this only for hosts that are under # our control. Since we cannot run the list of remote hosts # through the getHostnames function, we have to try and login # to every host without a password, and hope they connect. self.db.execute('select remote_host, remote_fs from ' +\ 'zfs_replication group by remote_host, remote_fs') rows = self.db.fetchall() cmd_line = '/opt/rocks/thumper/zfs-perms-setup.sh' ssh_cmd = 'ssh -xT -o NumberOfPasswordPrompts=0' threads = [] for (host, fs) in rows: cmd = '%s %s "%s %s"' % (ssh_cmd, host, cmd_line, fs) p = Parallel(cmd, host) threads.append(p) p.start() for thread in threads: thread.join(timeout)
def run(self, params, args): hosts = self.getHostnames(args, managed_only=1) localhost = self.getHostnames(["localhost"])[0] threads = [] for host in hosts: # # get the attributes for the host # attrs = self.db.getHostAttrs(host) cmd = '/opt/rocks/bin/rocks report host firewall ' cmd += '%s | ' % host cmd += '/opt/rocks/bin/rocks report script ' cmd += 'attrs="%s" | ' % attrs cmd += self.getExecCommand(host, localhost) p = Parallel(cmd, host) threads.append(p) p.start() # # collect the threads # for thread in threads: thread.join(timeout) threads = [] for host in hosts: cmd = 'echo "/sbin/service iptables restart > /dev/null 2>&1" |' cmd += self.getExecCommand(host, localhost) p = Parallel(cmd, host) threads.append(p) p.start() # # collect the threads # for thread in threads: thread.join(timeout) self.runPlugins(hosts)
def run(self, params, args): hosts = self.getHostnames(args, managed_only=1) threads = [] for host in hosts: # # get the attributes for the host # attrs = self.db.getHostAttrs(host) cmd = "/opt/rocks/bin/rocks report host firewall " cmd += "%s | " % host cmd += "/opt/rocks/bin/rocks report script " cmd += 'attrs="%s" | ' % attrs cmd += "ssh -x %s bash > /dev/null 2>&1" % host p = Parallel(cmd, host) threads.append(p) p.start() # # collect the threads # for thread in threads: thread.join(timeout) threads = [] for host in hosts: cmd = 'ssh -T -x %s "/sbin/service iptables restart ' % host cmd += '> /dev/null 2>&1" ' p = Parallel(cmd, host) threads.append(p) p.start() # # collect the threads # for thread in threads: thread.join(timeout) self.runPlugins(hosts)
def run(self, params, args): hosts = self.getHostnames(args, managed_only=1) attr_dict = {} # Index all available plugins by attribute plugin_dict = {} # Path where plugins are stored plugin_path = '/opt/rocks/var/plugins/sec_attr' sys.path.append(plugin_path) # Read all plugins in plugin path for plugin_file in os.listdir(plugin_path): if not plugin_file.endswith('.py'): continue p = plugin_file.split('.py')[0] # Import the plugin plugin = __import__(p).plugin() # Get the attribute that the plugin # will run on, and append it to the # plugin dictionary plugin_dict[plugin.get_sec_attr()] = plugin_file # create a known hosts temporary file # this is so we don't contaminate the regular known hosts file # since this sync might change the host keys. (khfid, khfname) = tempfile.mkstemp() # Sync the secure information with the plugins threads = [] for host in hosts: # Get a list of all secure attributes # of the host a_d = self.db.getHostSecAttrs(host) for a in a_d: # For each attribute, get the value value = a_d[a][0] # check if there is a plugin that # acts on the attribute plugin = None if plugin_dict.has_key(a): plugin_f = open(os.path.join( plugin_path, plugin_dict[a]), 'r') plugin = base64.b64encode(plugin_f.read()) plugin_f.close() if plugin is not None: attr_dict[a] = (value, plugin) # Once we get attribute, value, and plugin # pickle the information for the host (fid, fname) = tempfile.mkstemp() f = open(fname, 'w') pickle.dump(attr_dict, f) f.close() # Three commands are run in a single thread so that # we perform them as an atomic operation. # Ship the pickled file to destination host cmd = 'scp -q -o UserKnownHostsFile=%s %s %s:%s; ' % (khfname, fname, host, fname) # Remove the file from the frontend cmd = cmd + 'rm -rf %s; ' % (fname) # Run the unpickle procedure and run the plugin cmd = cmd + 'ssh -o UserKnownHostsFile=%s %s ' % (khfname,host) +\ '"/opt/rocks/bin/rocks '+\ 'run host sec_attr %s"' % (fname) p = Parallel(cmd, host) p.start() threads.append(p) for thread in threads: thread.join(timeout) if os.path.exists(khfname): os.unlink(khfname)
def run(self, params, args): hosts = self.getHostnames(args, managed_only=1) attr_dict = {} # Index all available plugins by attribute plugin_dict = {} # Path where plugins are stored plugin_path = '/opt/rocks/var/plugins/sec_attr' sys.path.append(plugin_path) # Read all plugins in plugin path for plugin_file in os.listdir(plugin_path): if not plugin_file.endswith('.py'): continue p = plugin_file.split('.py')[0] # Import the plugin plugin = __import__(p).plugin() # Get the attribute that the plugin # will run on, and append it to the # plugin dictionary plugin_dict[plugin.get_sec_attr()] = plugin_file # create a known hosts temporary file # this is so we don't contaminate the regular known hosts file # since this sync might change the host keys. (khfid, khfname) = tempfile.mkstemp() # Sync the secure information with the plugins threads = [] for host in hosts: # Get a list of all secure attributes # of the host a_d = self.db.getHostSecAttrs(host) for a in a_d: # For each attribute, get the value value = a_d[a][0] # check if there is a plugin that # acts on the attribute plugin = None if plugin_dict.has_key(a): plugin_f = open(os.path.join(plugin_path, plugin_dict[a]), 'r') plugin = base64.b64encode(plugin_f.read()) plugin_f.close() if plugin is not None: attr_dict[a] = (value, plugin) # Once we get attribute, value, and plugin # pickle the information for the host (fid, fname) = tempfile.mkstemp() f = open(fname, 'w') pickle.dump(attr_dict, f) f.close() # Three commands are run in a single thread so that # we perform them as an atomic operation. # Ship the pickled file to destination host cmd = 'scp -q -o UserKnownHostsFile=%s %s %s:%s; ' % ( khfname, fname, host, fname) # Remove the file from the frontend cmd = cmd + 'rm -rf %s; ' % (fname) # Run the unpickle procedure and run the plugin cmd = cmd + 'ssh -o UserKnownHostsFile=%s %s ' % (khfname,host) +\ '"/opt/rocks/bin/rocks '+\ 'run host sec_attr %s"' % (fname) p = Parallel(cmd, host) p.start() threads.append(p) for thread in threads: thread.join(timeout) if os.path.exists(khfname): os.unlink(khfname)
def run(self, params, args): hosts = self.getHostnames(args, managed_only=1) localhost = self.getHostnames(["localhost"])[0] threads = [] for host in hosts: # # get the attributes for the host # attrs = self.db.getHostAttrs(host) exec_statement = self.getExecCommand(host, localhost) cmd = "/opt/rocks/bin/rocks report host interface " cmd += "%s | " % host cmd += "/opt/rocks/bin/rocks report script " cmd += 'attrs="%s" | ' % attrs cmd += exec_statement cmd += "; /opt/rocks/bin/rocks report host network " cmd += "%s | " % host cmd += "/opt/rocks/bin/rocks report script " cmd += 'attrs="%s" | ' % attrs cmd += exec_statement cmd += "; /opt/rocks/bin/rocks report host route " cmd += "%s | " % host cmd += "/opt/rocks/bin/rocks report script " cmd += 'attrs="%s" | ' % attrs cmd += exec_statement p = Parallel(cmd, host) threads.append(p) p.start() # # collect the threads # for thread in threads: thread.join(timeout) self.command("sync.host.firewall", hosts) self.runPlugins(hosts) # # after all the configuration files have been rewritten, # restart the network # threads = [] for host in hosts: cmd = 'echo "/sbin/service network restart > /dev/null 2>&1" |' cmd += self.getExecCommand(host, localhost) p = Parallel(cmd, host) threads.append(p) p.start() # # collect the threads # for thread in threads: thread.join(timeout) # # if IP addresses change, we'll need to sync the config (e.g., # update /etc/hosts, /etc/dhcpd.conf, etc.). # self.command("sync.config") # # hack for ganglia on the frontend # if self.db.getHostname("localhost") in hosts and os.path.exists("/etc/ganglia/gmond.conf"): self.command("run.host", ["localhost", "service gmond restart > /dev/null 2>&1"])
def run(self, params, args): hosts = self.getHostnames(args, managed_only=1) localhost = self.getHostnames(["localhost"])[0] threads = [] for host in hosts: # # get the attributes for the host # attrs = self.db.getHostAttrs(host) attrs = rocks.util.escapeStringForShell(str(attrs)) exec_statement = self.getExecCommand(host, localhost) cmd = '/opt/rocks/bin/rocks report host interface ' cmd += '%s | ' % host cmd += '/opt/rocks/bin/rocks report script ' cmd += 'attrs="%s" | ' % attrs cmd += exec_statement cmd += '; /opt/rocks/bin/rocks report host network ' cmd += '%s | ' % host cmd += '/opt/rocks/bin/rocks report script ' cmd += 'attrs="%s" | ' % attrs cmd += exec_statement cmd += '; /opt/rocks/bin/rocks report host route ' cmd += '%s | ' % host cmd += '/opt/rocks/bin/rocks report script ' cmd += 'attrs="%s" | ' % attrs cmd += exec_statement p = Parallel(cmd, host) threads.append(p) p.start() # # collect the threads # for thread in threads: thread.join(timeout) self.command('sync.host.firewall', hosts) self.runPlugins(hosts) # # after all the configuration files have been rewritten, # restart the network # threads = [] for host in hosts: cmd = 'echo "/sbin/service network restart > /dev/null 2>&1" |' cmd += self.getExecCommand(host, localhost) p = Parallel(cmd, host) threads.append(p) p.start() # # collect the threads # for thread in threads: thread.join(timeout) # # if IP addresses change, we'll need to sync the config (e.g., # update /etc/hosts, /etc/dhcpd.conf, etc.). # self.command('sync.config') # # hack for ganglia on the frontend # if self.db.getHostname('localhost') in hosts and \ os.path.exists('/etc/ganglia/gmond.conf'): self.command( 'run.host', ['localhost', 'service gmond restart > /dev/null 2>&1'])
def run(self, params, args): hosts = self.getHostnames(args, managed_only=1) threads = [] for host in hosts: # # get the attributes for the host # attrs = self.db.getHostAttrs(host) cmd = '/opt/rocks/bin/rocks report host interface ' cmd += '%s | ' % host cmd += '/opt/rocks/bin/rocks report script ' cmd += 'attrs="%s" | ' % attrs cmd += 'ssh -T -x %s bash > /dev/null 2>&1 ' % host cmd += '; /opt/rocks/bin/rocks report host network ' cmd += '%s | ' % host cmd += '/opt/rocks/bin/rocks report script ' cmd += 'attrs="%s" | ' % attrs cmd += 'ssh -T -x %s bash > /dev/null 2>&1 ' % host cmd += '; /opt/rocks/bin/rocks report host route ' cmd += '%s | ' % host cmd += '/opt/rocks/bin/rocks report script ' cmd += 'attrs="%s" | ' % attrs cmd += 'ssh -T -x %s bash > /dev/null 2>&1 ' % host p = Parallel(cmd, host) threads.append(p) p.start() # # collect the threads # for thread in threads: thread.join(timeout) self.command('sync.host.firewall', hosts) self.runPlugins(hosts) # # after all the configuration files have been rewritten, # restart the network # threads = [] for host in hosts: cmd = 'ssh %s "/sbin/service network restart ' % host cmd += '> /dev/null 2>&1" ' p = Parallel(cmd, host) threads.append(p) p.start() # # collect the threads # for thread in threads: thread.join(timeout) # # if IP addresses change, we'll need to sync the config (e.g., # update /etc/hosts, /etc/dhcpd.conf, etc.). # self.command('sync.config') # # hack for ganglia on the frontend # if self.db.getHostname('localhost') in hosts and \ os.path.exists('/etc/ganglia/gmond.conf'): self.command('run.host', [ 'localhost', 'service gmond restart > /dev/null 2>&1' ] )