def _wait(mutable): try: # Only issue power change command once if mutable['iter'] < 0: xcat_util.exec_xcatcmd(driver_info,'rpower',state_name) else: mutable['power'] = _power_status(driver_info) except Exception: # Log failures but keep trying LOG.warning(_("xcat rpower %(state)s failed for node %(node)s."), {'state': state_name, 'node': driver_info['uuid']}) finally: mutable['iter'] += 1 if mutable['power'] == target_state: raise loopingcall.LoopingCallDone() sleep_time = _sleep_time(mutable['iter']) if (sleep_time + mutable['total_time']) > CONF.ipmi.retry_timeout: # Stop if the next loop would exceed maximum retry_timeout LOG.error(_('xcat rpower %(state)s timed out after ' '%(tries)s retries on node %(node_id)s.'), {'state': state_name, 'tries': mutable['iter'], 'node_id': driver_info['uuid']}) mutable['power'] = states.ERROR raise loopingcall.LoopingCallDone() else: mutable['total_time'] += sleep_time return sleep_time
def _wait(mutable): try: # Only issue power change command once if mutable['iter'] < 0: _exec_ipmitool(driver_info, "power %s" % state_name) else: mutable['power'] = _power_status(driver_info) except (exception.PasswordFileFailedToCreate, processutils.ProcessExecutionError, exception.IPMIFailure): # Log failures but keep trying LOG.warning(_LW("IPMI power %(state)s failed for node %(node)s."), {'state': state_name, 'node': driver_info['uuid']}) finally: mutable['iter'] += 1 if mutable['power'] == target_state: raise loopingcall.LoopingCallDone() sleep_time = _sleep_time(mutable['iter']) if (sleep_time + mutable['total_time']) > CONF.ipmi.retry_timeout: # Stop if the next loop would exceed maximum retry_timeout LOG.error(_LE('IPMI power %(state)s timed out after ' '%(tries)s retries on node %(node_id)s.'), {'state': state_name, 'tries': mutable['iter'], 'node_id': driver_info['uuid']}) mutable['power'] = states.ERROR raise loopingcall.LoopingCallDone() else: mutable['total_time'] += sleep_time return sleep_time
def _wait(node_uuid, popen_obj): locals['returncode'] = popen_obj.poll() # check if the console pid is created. # if it is, then the shellinaboxd is invoked successfully as a daemon. # otherwise check the error. if locals['returncode'] is not None: if locals['returncode'] == 0 and os.path.exists(pid_file): raise loopingcall.LoopingCallDone() else: (stdout, stderr) = popen_obj.communicate() locals['errstr'] = _("Command: %(command)s.\n" "Exit code: %(return_code)s.\n" "Stdout: %(stdout)r\n" "Stderr: %(stderr)r") % {'command': ' '.join(args), 'return_code': locals['returncode'], 'stdout': stdout, 'stderr': stderr} LOG.warning(locals['errstr']) raise loopingcall.LoopingCallDone() if (time.time() > expiration): locals['errstr'] = _("Timeout while waiting for console" " subprocess to start for node %s.") % node_uuid LOG.warning(locals['errstr']) raise loopingcall.LoopingCallDone()
def _wait(status): status['power'] = _power_status(node) if status['power'] == target_state: raise loopingcall.LoopingCallDone() if status['iter'] >= CONF.amt.max_attempts: status['power'] = states.ERROR LOG.warning( _LW("AMT failed to set power state %(state)s after " "%(tries)s retries on node %(node_id)s."), { 'state': target_state, 'tries': status['iter'], 'node_id': node.uuid }) raise loopingcall.LoopingCallDone() try: _set_power_state(node, target_state) except Exception: # Log failures but keep trying LOG.warning( _LW("AMT set power state %(state)s for node %(node)s " "- Attempt %(attempt)s times of %(max_attempt)s " "failed."), { 'state': target_state, 'node': node.uuid, 'attempt': status['iter'] + 1, 'max_attempt': CONF.amt.max_attempts }) status['iter'] += 1
def _wait_for_deploy(): out, err = xcat_util.exec_xcatcmd(driver_info, 'nodels', 'nodelist.status') if err: locals['errstr'] = _( "Error returned when quering node status" " for node %s:%s") % (driver_info['xcat_node'], err) LOG.warning(locals['errstr']) raise loopingcall.LoopingCallDone() if out: node, status = out.split(": ") status = status.strip() if status == "booted": LOG.info( _("Deployment for node %s completed.") % driver_info['xcat_node']) raise loopingcall.LoopingCallDone() if (CONF.xcat.deploy_timeout and timeutils.utcnow() > expiration): locals['errstr'] = _( "Timeout while waiting for" " deployment of node %s.") % driver_info['xcat_node'] LOG.warning(locals['errstr']) raise loopingcall.LoopingCallDone()
def _wait_for_disk_to_become_available(self, retries, max_retries, pids, stderr): retries[0] += 1 if retries[0] > max_retries: raise loopingcall.LoopingCallDone() try: # NOTE(ifarkas): fuser returns a non-zero return code if none of # the specified files is accessed out, err = utils.execute('fuser', self._device, check_exit_code=[0, 1], run_as_root=True) if not out and not err: raise loopingcall.LoopingCallDone() else: if err: stderr[0] = err if out: pids_match = re.search(self._fuser_pids_re, out) pids[0] = pids_match.group() except processutils.ProcessExecutionError as exc: LOG.warning( _LW('Failed to check the device %(device)s with fuser:'******' %(err)s'), { 'device': self._device, 'err': exc })
def _wait_execution(mutable, channel): try: stdout_data = channel.recv(1048576) except Exception: LOG.debug('No data from SSH stdout.') else: LOG.debug('Got %d from SSH stdout.', len(stdout_data)) stdout_io.write(stdout_data) try: stderr_data = channel.recv_stderr(1048576) except Exception: LOG.debug('No data from SSH stderr.') else: LOG.debug('Got %d from SSH stderr.', len(stderr_data)) stderr_io.write(stderr_data) if channel.exit_status_ready(): raise loopingcall.LoopingCallDone() try: ssh = utils.ssh_connect(ssh_params) except exception.SSHConnectFailed: mutable['error'] = True raise loopingcall.LoopingCallDone() else: ssh.close()
def _wait_for_deploy(): """Called at an interval until the deployment completes.""" try: row = db.bm_node_get(context, node['id']) if instance['uuid'] != row.get('instance_uuid'): locals['error'] = _("Node associated with another instance" " while waiting for deploy of %s") raise loopingcall.LoopingCallDone() status = row.get('task_state') if (status == states.DEPLOYING and locals['started'] is False): LOG.info( _("PXE deploy started for instance %s") % instance['uuid']) locals['started'] = True elif status in (states.DEPLOYDONE, states.ACTIVE): LOG.info( _("PXE deploy completed for instance %s") % instance['uuid']) raise loopingcall.LoopingCallDone() elif status == states.DEPLOYFAIL: locals['error'] = _("PXE deploy failed for instance %s") except exception.NodeNotFound: locals['error'] = _("Baremetal node deleted while waiting " "for deployment of instance %s") if (CONF.pxe_deploy_timeout and timeutils.utcnow() > expiration): locals['error'] = _("Timeout reached while waiting for " "PXE deploy of instance %s") if locals['error']: raise loopingcall.LoopingCallDone()
def _wait(state, retries): state[0] = ucs_power_handle.get_power_state() if ((retries[0] != 0) and (UCS_TO_IRONIC_POWER_STATE.get(state[0]) == target_state)): raise loopingcall.LoopingCallDone() if retries[0] > CONF.cisco_ucs.max_retry: state[0] = states.ERROR raise loopingcall.LoopingCallDone() retries[0] += 1
def _wait(state): state[0] = _get_power_state(node) # NOTE(rameshg87): For reboot operations, initially the state # will be same as the final state. So defer the check for one retry. if retries[0] != 0 and state[0] == target_state: raise loopingcall.LoopingCallDone() if retries[0] > CONF.ilo.power_retry: state[0] = states.ERROR raise loopingcall.LoopingCallDone() retries[0] += 1
def _poll_for_state(mutable): """Called at an interval until the node's power is consistent. :param mutable: dict object containing "state" and "next_time" :raises: SNMPFailure if an SNMP request fails. """ mutable["state"] = self._snmp_power_state() if mutable["state"] == goal_state: raise loopingcall.LoopingCallDone() mutable["next_time"] += self.retry_interval if mutable["next_time"] >= CONF.snmp.power_timeout: mutable["state"] = states.ERROR raise loopingcall.LoopingCallDone()
def _wait_for_power_off(state, retries): """Called at an interval until the node is powered off.""" state[0] = _get_power_status(node) if state[0] == states.POWER_OFF: raise loopingcall.LoopingCallDone() if retries[0] > CONF.seamicro.max_retry: state[0] = states.ERROR raise loopingcall.LoopingCallDone() try: retries[0] += 1 server.power_off() except seamicro_client_exception.ClientException: LOG.warning(_LW("Power-off failed for node %s."), node.uuid)
def _wait_for_power_off(): """Called at an interval until the node's power is off.""" self._update_state() if self.state == states.POWER_OFF: raise loopingcall.LoopingCallDone() if self.retries > CONF.ipmi_power_retry: self.state = states.ERROR raise loopingcall.LoopingCallDone() try: self.retries += 1 self._exec_ipmitool("power off") except Exception: LOG.exception(_("IPMI power off failed"))
def _wait_for_reboot(state, retries): """Called at an interval until the node is rebooted successfully.""" state[0] = _get_power_status(node) if state[0] == states.POWER_ON: raise loopingcall.LoopingCallDone() if retries[0] > CONF.seamicro.max_retry: state[0] = states.ERROR raise loopingcall.LoopingCallDone() try: retries[0] += 1 server.reset() except seamicro_client_exception.ClientException: LOG.warning(_LW("Reboot failed for node %s."), node.uuid)
def _wait_for_power_off(state, retries): """Called at an interval until the node's power is off.""" state[0] = _power_status(driver_info) if state[0] == states.POWER_OFF: raise loopingcall.LoopingCallDone() if retries[0] > CONF.ipmi_power_retry: state[0] = states.ERROR raise loopingcall.LoopingCallDone() try: retries[0] += 1 _exec_ipmitool(driver_info, "power off") except Exception: # Log failures but keep trying LOG.warning( _("IPMI power off failed for node %s.") % driver_info['uuid'])
def _wait_for_power_off(state, retries): """Called at an interval until the node's power is off.""" state[0] = _power_status(driver_info) if state[0] == states.POWER_OFF: raise loopingcall.LoopingCallDone() if retries[0] > CONF.ipmi.retry_timeout: LOG.error(_('IPMI power off timed out after %(tries)s retries.'), {'tries': retries[0]}) state[0] = states.ERROR raise loopingcall.LoopingCallDone() try: # only issue "power off" once if retries[0] == 0: _exec_ipmitool(driver_info, "power off") retries[0] += 1 except Exception: # Log failures but keep trying LOG.warning( _("IPMI power off failed for node %s.") % driver_info['uuid'])
def _wait_for_download(): if not os.path.exists(lock_file): raise loopingcall.LoopingCallDone()