def setup(self): super(RebootStage, self).setup() LOG.info('Reboot stage for: %s', self.device) SCMD.ssh.reboot(device=self.device) if self.specs.mcpd and _bool(self.specs.mcpd): self._wait_after_reboot(self.device) if self.specs.restjavad and _bool(self.specs.restjavad): RCMD.system.wait_restjavad([self.device])
def setup(self): super(TweaksStage, self).setup() LOG.info('Tweaks stage for: %s', self.device) # mcp: Enable MCPD debug logging if self.specs.mcp and _bool(self.specs.mcp): with SSHInterface(device=self.device) as sshifc: sshifc.api.run('setdb log.mcpd.level debug') # icontrol: Enable icontrol debug logging if self.specs.icontrol and _bool(self.specs.icontrol): with SSHInterface(device=self.device) as sshifc: sshifc.api.run('setdb icontrol.loglevel debug') sshifc.api.run('bigstart restart httpd') # logrotate: Force logrotate all logs if self.specs.logrotate and _bool(self.specs.logrotate): with SSHInterface(device=self.device) as sshifc: sshifc.api.run('/usr/sbin/logrotate /etc/logrotate.conf -f') # BIG-IQ is so "special" - it doesn't use the usual logrotate ways. # 07/24 IT: restarting restjavad causes errors in discovery # sshifc.api.run('[ -f /service/restjavad/run ] && bigstart stop restjavad && rm -f /var/log/restjavad* && bigstart start restjavad') # log_finest: Force restjavad log files to use FINEST level. if self.specs.log_finest and _bool(self.specs.log_finest): with SSHInterface(device=self.device) as sshifc: sshifc.api.run("sed -i 's/^.level=.*/.level=FINEST/g' /etc/restjavad.log.conf && bigstart restart restjavad") # scp: Copy files to/from if self.specs.scp: params = self.specs.scp source = params.source if isinstance(params.source, basestring) \ else ' '.join(params.source) with SSHInterface(device=self.device) as sshifc: if params.method.lower() == 'get': SCMD.ssh.scp_get(ifc=sshifc, source=source, destination=params.destination, nokex=True) elif params.method.lower() == 'put': SCMD.ssh.scp_put(ifc=sshifc, source=source, destination=params.destination, nokex=True) else: raise ValueError("Unknown scp method: %s" % params.method) # shell: Execute shell commands if self.specs.shell: commands = [self.specs.shell] if isinstance(self.specs.shell, basestring) \ else self.specs.shell with SSHInterface(device=self.device) as sshifc: for command in commands: ret = sshifc.api.run(command) LOG.debug(ret)
def cfgToArg(self, optname, value): argv = [] if flag(value): if _bool(value): argv.append('--' + optname) else: argv.extend(['--' + optname, value]) return argv
def cfgToArg(self, optname, value): argv = [] long_optname = '--' + optname opt = self.__parser.get_option(long_optname) if opt.action in ('store_true', 'store_false'): if not flag(value): raise ValueError("Invalid value '%s' for '%s'" % ( value, optname)) if _bool(value): argv.append(long_optname) else: argv.extend([long_optname, value]) return argv
def process_stages(stages, section, context, stop_on_error=True): if not stages: LOG.debug('No stages found.') return # Replicate the "_enabled" flag. carry_flag(stages) # Build the stage map with *ALL* defined stage classes in this file. stages_map = {} for value in globals().values(): if inspect.isclass(value) and issubclass(value, Stage) and value != Stage: stages_map[value.name] = value # Focus only on our stages section for key in section.split('.'): stages = stages.get(key, Options()) # Sort stages by priority attribute and stage name. stages = sorted(stages.iteritems(), key=lambda x: (isinstance(x[1], dict) and x[1].get(PRIORITY_KEY, DEFAULT_PRIORITY), x[0])) config = ConfigInterface().config # Group stages of the same type. The we spin up one thread per stage in a # group and wait for threads within a group to finish. sg_dict = {} sg_list = [] for name, specs in stages: if not specs or name.startswith('_'): continue assert TYPE_KEY in specs, "%s stage is invalid. No type specified." % name specs = Options(specs) key = specs.get(GROUP_KEY, "{0}-{1}".format(name, specs[TYPE_KEY])) group = sg_dict.get(key) if not group: sg_dict[key] = [] sg_list.append(sg_dict[key]) sg_dict[key].append((name, specs)) LOG.debug("sg_list: %s", sg_list) for stages in sg_list: q = Queue() pool = [] for stage in stages: description, specs = stage if not specs or not _bool(specs.get(ENABLE_KEY)): continue LOG.info("Processing stage: %s", description) # items() reverts <Options> to a simple <dict> specs = Options(specs) if not stages_map.get(specs[TYPE_KEY]): LOG.warning("Stage '%s' (%s) not defined.", description, specs[TYPE_KEY]) continue stage_class = stages_map[specs[TYPE_KEY]] parameters = specs.get(PARAMETERS_KEY) or Options() parameters._context = context devices = expand_devices(specs) if devices is None: stage_class(parameters).run() elif devices == []: LOG.error("Stage %s requires devices but found none" % description) else: if not devices: LOG.warning('No devices found for stage %s', description) for device in devices: stage = stage_class(device, parameters) name = '%s :: %s' % (description, device.alias) if device else description t = MacroThread(stage, q, name=name, config=config) t.start() pool.append(t) if not stage_class.parallelizable or not specs.get('parallelizable', True): t.join() # Cap the number parallel threads if len(pool) >= specs.get('threads', MAX_THREADS): map(lambda x: x.join(), pool) pool[:] = [] LOG.debug('Waiting for threads...') for t in pool: t.join() if not q.empty(): stages = [] while not q.empty(): ret = q.get(block=False) thread, exc_info = ret.popitem() stages.append((thread, exc_info)) LOG.error('Exception while "%s"', thread.getName()) for line in traceback.format_exception(*exc_info): LOG.error(line.strip()) if stop_on_error: raise StageError(stages)
def process_stages(stages, section, context, stop_on_error=True): if not stages: LOG.debug('No stages found.') return # Replicate the "_enabled" flag. carry_flag(stages) # Build the stage map with *ALL* defined stage classes in this file. stages_map = {} for value in globals().values(): if inspect.isclass(value) and issubclass(value, Stage) and value != Stage: stages_map[value.name] = value # Focus only on our stages section for key in section.split('.'): stages = stages.get(key, Options()) # Sort stages by priority attribute and stage name. stages = sorted(stages.iteritems(), key=lambda x: (isinstance(x[1], dict) and x[1].get(PRIORITY_KEY, DEFAULT_PRIORITY), x[0])) config = ConfigInterface().config # Group stages of the same type. The we spin up one thread per stage in a # group and wait for threads within a group to finish. sg_dict = {} sg_list = [] for name, specs in stages: if not specs or name.startswith('_'): continue assert TYPE_KEY in specs, "%s stage is invalid. No type specified." % name specs = Options(specs) key = specs.get(GROUP_KEY, "{0}-{1}".format(name, specs[TYPE_KEY])) group = sg_dict.get(key) if not group: sg_dict[key] = [] sg_list.append(sg_dict[key]) sg_dict[key].append((name, specs)) LOG.debug("sg_list: %s", sg_list) for stages in sg_list: q = Queue() pool = [] for stage in stages: description, specs = stage if not specs or not _bool(specs.get(ENABLE_KEY)): continue LOG.info("Processing stage: %s", description) # items() reverts <Options> to a simple <dict> specs = Options(specs) if not stages_map.get(specs[TYPE_KEY]): LOG.warning("Stage '%s' (%s) not defined.", description, specs[TYPE_KEY]) continue stage_class = stages_map[specs[TYPE_KEY]] parameters = specs.get(PARAMETERS_KEY) or Options() parameters._context = context devices = expand_devices(specs) if devices is None: stage_class(parameters).run() elif devices == []: LOG.error("Stage %s requires devices but found none" % description) else: if not devices: LOG.warning('No devices found for stage %s', description) if specs.get('shuffle', False): random.shuffle(devices) for device in devices: stage = stage_class(device, parameters) name = '%s :: %s' % (description, device.alias) if device else description t = MacroThread(stage, q, name=name) t.start() pool.append(t) if not stage_class.parallelizable or not specs.get('parallelizable', True): t.join() # Cap the number parallel threads if len(pool) >= specs.get('threads', MAX_THREADS): map(lambda x: x.join(), pool) pool[:] = [] LOG.debug('Waiting for threads...') for t in pool: t.join() if not q.empty(): stages = [] while not q.empty(): ret = q.get(block=False) thread, exc_info = ret.popitem() stages.append((thread, exc_info)) LOG.error('Exception while "%s"', thread.getName()) for line in traceback.format_exception(*exc_info): LOG.error(line.strip()) if stop_on_error: raise StageError(stages)