Пример #1
0
 def nodes_check_access(self):
     self.logger.debug('checking if nodes are accessible')
     run_items = []
     for key, node in self.selected_nodes.items():
         run_items.append(tools.RunItem(target=node.check_access, key=key))
     result = tools.run_batch(run_items, self.maxthreads, dict_result=True)
     for key in result:
         self.nodes[key].accessible = result[key]
Пример #2
0
 def nodes_get_os(self):
     run_items = []
     for key, node in self.selected_nodes.items():
         if not node.os_platform:
             run_items.append(tools.RunItem(target=node.get_os, key=key))
     result = tools.run_batch(run_items, self.maxthreads, dict_result=True)
     for key in result:
         if result[key]:
             self.nodes[key].os_platform = result[key]
Пример #3
0
 def nodes_get_roles_hiera(self, maxthreads=100):
     run_items = []
     for key, node in self.selected_nodes.items():
         if node.status != 'discover' and not node.roles:
             run_items.append(tools.RunItem(target=node.get_roles_hiera,
                                            key=key))
     result = tools.run_batch(run_items, maxthreads, dict_result=True)
     for key in result:
         if result[key]:
             self.nodes[key].roles = result[key]
Пример #4
0
 def nodes_get_cluster_ids(self, maxthreads=100):
     self.logger.debug('getting cluster ids from nodes')
     run_items = []
     for key, node in self.selected_nodes.items():
         if not node.cluster:
             run_items.append(tools.RunItem(target=node.get_cluster_id,
                                            key=key))
     result = tools.run_batch(run_items, maxthreads, dict_result=True)
     for key in result:
         if result[key] is not None:
             self.nodes[key].cluster = result[key]
Пример #5
0
 def run_commands(self, timeout=15, fake=False):
     run_items = []
     for key, node in self.selected_nodes.items():
         run_items.append(
             tools.RunItem(target=node.exec_cmd,
                           args={'fake': fake},
                           key=key))
     result = tools.run_batch(run_items, self.maxthreads, dict_result=True)
     for key in result:
         self.nodes[key].mapcmds = result[key][0]
         self.nodes[key].mapscr = result[key][1]
Пример #6
0
 def get_release_cli(self):
     run_items = []
     for key, node in self.selected_nodes.items():
         run_items.append(tools.RunItem(target=node.get_release,
                                        key=key))
     result = tools.run_batch(run_items, 100, dict_result=True)
     if result:
         for key in result:
             self.nodes[key].release = result[key]
         return True
     else:
         return False
Пример #7
0
 def get_logs(self, timeout, fake=False):
     if fake:
         self.logger.info('fake = True, skipping')
         return
     if self.conf['logs_speed_limit']:
         if self.conf['logs_speed'] > 0:
             speed = self.conf['logs_speed']
         else:
             speed = self.find_adm_interface_speed()
         speed = int(speed * 0.9 /
                     min(self.logs_maxthreads, len(self.nodes)))
         py_slowpipe = tools.slowpipe % speed
         limitcmd = "| python -c '%s'; exit ${PIPESTATUS}" % py_slowpipe
     run_items = []
     for node in self.selected_nodes.values():
         if not node.logs_dict():
             self.logger.info(("%s: no logs to collect") % node.repr)
             continue
         node.archivelogsfile = os.path.join(self.conf['archive_dir'],
                                             'logs-%s.tar.gz' % node.repr)
         tools.mdir(self.conf['archive_dir'])
         input = ''
         for fn in node.logs_dict():
             input += '%s\0' % fn.lstrip(os.path.abspath(os.sep))
         cmd = ("tar --transform 's,^,%s/,' --gzip -C %s --create "
                "--warning=no-file-changed --file - --null --files-from -" %
                (node.repr, os.path.abspath(os.sep)))
         if self.conf['logs_speed_limit']:
             if not (node.ip == 'localhost' or node.ip.startswith('127.')):
                 cmd = ' '.join([cmd, limitcmd])
         args = {
             'cmd': cmd,
             'timeout': timeout,
             'outfile': node.archivelogsfile,
             'input': input,
             'ok_codes': [0, 1]
         }
         run_items.append(
             tools.RunItem(target=node.exec_simple_cmd, args=args))
     tools.run_batch(run_items, self.logs_maxthreads)
Пример #8
0
 def run_scripts_all_pairs(self, fake=False):
     nodes = self.selected_nodes.values()
     max_pairs = self.conf['scripts_all_pairs_max_pairs']
     if len(nodes) < 2:
         self.logger.warning('less than 2 nodes are available, '
                             'skipping paired scripts')
         return
     run_server_start_items = []
     run_server_stop_items = []
     for n in nodes:
         start_args = {'phase': 'server_start', 'fake': fake}
         run_server_start_items.append(
             tools.RunItem(target=n.exec_pair, args=start_args, key=n.ip))
         stop_args = {'phase': 'server_stop', 'fake': fake}
         run_server_stop_items.append(
             tools.RunItem(target=n.exec_pair, args=stop_args))
     result = tools.run_batch(run_server_start_items,
                              self.maxthreads,
                              dict_result=True)
     for key in result:
         self.nodes[key].scripts_all_pairs = result[key]
     one_way = self.conf['scripts_all_pairs_one_way']
     chain = tools.all_pairs(nodes, one_way=one_way, max_pairs=max_pairs)
     for pairset in chain:
         run_client_items = []
         self.logger.info(['%s->%s' % (p[0].ip, p[1].ip) for p in pairset])
         for pair in pairset:
             client = pair[0]
             server = pair[1]
             client_args = {
                 'phase': 'client',
                 'server_node': server,
                 'fake': fake
             }
             run_client_items.append(
                 tools.RunItem(target=client.exec_pair, args=client_args))
         tools.run_batch(run_client_items, len(run_client_items))
     tools.run_batch(run_server_stop_items, self.maxthreads)
Пример #9
0
 def calculate_log_size(self, timeout=15):
     total_size = 0
     run_items = []
     for key, node in self.selected_nodes.items():
         run_items.append(
             tools.RunItem(target=node.logs_populate,
                           args={'timeout': timeout},
                           key=key))
     result = tools.run_batch(run_items, self.maxthreads, dict_result=True)
     for key in result:
         self.nodes[key].logs = result[key]
     for node in self.selected_nodes.values():
         total_size += sum(node.logs_dict().values())
     self.logger.info('Full log size on nodes(with fuel): %d bytes' %
                      total_size)
     self.alogsize = total_size
     return self.alogsize
Пример #10
0
 def put_files(self):
     run_items = []
     for node in self.selected_nodes.values():
         run_items.append(tools.RunItem(target=node.put_files))
     tools.run_batch(run_items, 10)