def update_pxeconfig(self): keep_orignal_rootfs_dev = False boot_cmds = ' ip=dhcp selinux=0 enforcing=0' pxeconfig_filename = self.utils.target_pxeconfig_filename( barcode=self.device_barcode) self.logger.info("update_pxeconfig, pxeconfig_filename : %s", str(pxeconfig_filename)) default_bootargs = 'ipv6.disable=1' if 'kernel' in self.parameters: if 'rootfs_path' not in self.parameters: keep_orignal_rootfs_dev = True else: self.rootfs_path = self.parameters.get('rootfs_path') boot_cmds += ' root=' + self.rootfs_path + ' rw' if 'bootargs' in self.parameters: boot_cmds += ' ' + self.parameters.get('bootargs') else: boot_cmds += ' ' + default_bootargs try: shutil.copy(pxeconfig_filename, 'gpxe_tmp.conf') for line in fileinput.input('gpxe_tmp.conf', inplace=True): # The commas at the end of these lines are intentional # avoids adding additional newlines in the file if line.find('Golden - LAVA') != -1: print(line.replace('Golden - LAVA', 'Testrun - LAVA'), end=" ") elif line.find('+ initramfs') != -1: print(line.replace('+ initramfs', 'rootfs on %s' % self.rootfs_path), end=" ") elif line.find('kernel') == 0 and line.find('tftp') != -1: cut_point = line.find('ip=') if keep_orignal_rootfs_dev: cut_point = -1 line = line[0:cut_point] + boot_cmds self.logger.info("update_pxeconfig, line : %s", str(line)) print(line, end=" ") elif line.find('append') == 0: cut_point = line.find('ip=') if keep_orignal_rootfs_dev: cut_point = -1 line = line[0:cut_point] + boot_cmds self.logger.info("update_pxeconfig, line : %s", str(line)) print(line, end=" ") elif line.find('Append') == 0: cut_point = line.find('ip=') if keep_orignal_rootfs_dev: cut_point = -1 line = line[0:cut_point] + boot_cmds self.logger.info("update_pxeconfig, line : %s", str(line)) print(line, end=" ") else: print(line, end=" ") shutil.copy('gpxe_tmp.conf', pxeconfig_filename) except: raise JobError("Deployment failed, miss pxeconfig")
def run(self, connection, max_end_time): connection = super().run(connection, max_end_time) image_file = self.get_namespace_data(action='download-action', label='image', key='file') cmd = 'dd if={} of={} bs=1M oflag=sync conv=fsync'.format( image_file, self.usb_mass_device) if not self.run_command(cmd.split(' '), allow_silent=True): raise JobError("writing to the USB mass storage device failed") connection.sendcontrol('c') return connection
def run(self, connection, max_end_time): connection = super().run(connection, max_end_time) for pyocd_command in self.exec_list: pyocd = ' '.join(pyocd_command) self.logger.info("PyOCD command: %s", pyocd) if not self.run_command(pyocd.split(' ')): raise JobError("%s command failed" % (pyocd.split(' '))) self.set_namespace_data(action='shared', label='shared', key='connection', value=connection) return connection
def run(self, connection, max_end_time, args=None): connection = super(LxcStopAction, self).run(connection, max_end_time, args) lxc_name = self.get_namespace_data(action='lxc-create-action', label='lxc', key='name') lxc_cmd = ['lxc-stop', '-k', '-n', lxc_name] command_output = self.run_command(lxc_cmd) if command_output and command_output is not '': raise JobError("Unable to stop lxc container: %s" % command_output) # FIXME: JobError needs a unit test return connection
def run(self, connection, max_end_time, args=None): connection = super(DeployVExpressRecoveryImage, self).run(connection, max_end_time, args) mount_point = self.get_namespace_data(action='mount-vexpress-usbmsd', label='vexpress-fw', key='mount-point') try: os.path.realpath(mount_point) except OSError: raise InfrastructureError("Unable to locate mount point: %s" % mount_point) src_dir = self.get_namespace_data( action='extract-vexpress-recovery-image', label='file', key='recovery_image') try: os.path.realpath(src_dir) except OSError: raise InfrastructureError( "Unable to locate recovery image source directory: %s" % src_dir) self.logger.debug( "Removing existing recovery image from Versatile Express mass storage device.." ) try: remove_directory_contents(mount_point) except Exception: raise JobError("Failed to erase old recovery image") self.logger.debug( "Transferring new recovery image to Versatile Express mass storage device.." ) try: copy_directory_contents(src_dir, mount_point) except Exception: raise JobError("Failed to deploy recovery image to %s" % mount_point) return connection
def collate(self, reply, params): """ Retrieve values from reply to the call for this action possibly multiple key:value pairs. Arguments: reply - self.get_namespace_data(action=protocol.name, label=protocol.name, key=self.name) params - dict containing the message to match to the reply params will not be modified, the return value is a *tuple* where the first value is the identifier to be used by other actions wanting this data (typically the API call or messageID) and the second value is the collated data from the call to the protocol. """ retval = {} if reply == {} or not isinstance(reply, dict): msg = "Unable to identify replaceable values in the parameters: %s" % params self.logger.error(msg) raise JobError(msg) self.logger.debug({ "Retrieving replaceable values from": "%s" % json.dumps(reply), "params": "%s" % json.dumps(params)}) if 'message' in params and reply: replaceables = [key for key, value in params['message'].items() if key != 'yaml_line' and value.startswith('$')] for item in replaceables: if 'message' in reply: target_list = [val for val in reply['message'].items() if self.job_id in val] else: target_list = [val for val in list(reply.items())] data = target_list[0][1] if item not in data: self.logger.warning("Skipping %s - not found in %s", item, data) continue retval.setdefault(params['messageID'], {item: data[item]}) if 'messageID' in params: ret_key = params['messageID'] if ret_key in retval: ret_value = retval[ret_key] return ret_key, ret_value msg = "Unable to identify replaceable values in the parameters: %s" % params self.logger.error(msg) raise JobError(msg)
def wait(self, max_end_time=None): """ Simple wait without sendling blank lines as that causes the menu to advance without data which can cause blank entries and can cause the menu to exit to an unrecognised prompt. """ while True: try: self.raw_connection.expect(self.prompt_str, timeout=self.timeout.duration) except pexpect.TIMEOUT: raise JobError("wait for prompt timed out") else: break
def _wait_on_create(self, friendly_name): if not self.configured: return False wait_msg = { 'request': 'lava_wait', 'timeout': VLAND_DEPLOY_TIMEOUT, 'messageID': friendly_name, } ret = self.multinode_protocol(wait_msg) if ret: values = list(ret.values())[0] return (values['vlan_name'], values['vlan_tag'],) raise JobError("Waiting for vlan creation failed: %s" % ret)
def run(self, connection, max_end_time): connection = super().run(connection, max_end_time) lxc_name = self.get_namespace_data(action='lxc-create-action', label='lxc', key='name') packages = self.parameters['packages'] cmd = [ 'lxc-attach', '-v', 'DEBIAN_FRONTEND=noninteractive', '-n', lxc_name, '--', 'apt-get', '-y', '-q', 'install' ] + packages if not self.run_command(cmd): raise JobError("Unable to install using apt-get in lxc container") return connection
def run(self, connection, max_end_time): connection = super().run(connection, max_end_time) verbose = '' if self.lxc_data['verbose'] else '-q' lxc_default_path = lxc_path(self.job.parameters['dispatcher']) if self.lxc_data['custom_lxc_path']: lxc_create = ['lxc-create', '-P', lxc_default_path] else: lxc_create = ['lxc-create'] if self.lxc_data['lxc_template'] in LXC_TEMPLATE_WITH_MIRROR: lxc_cmd = lxc_create + [ verbose, '-t', self.lxc_data['lxc_template'], '-n', self.lxc_data['lxc_name'], '--', '--release', self.lxc_data['lxc_release'] ] if self.lxc_data['lxc_mirror']: lxc_cmd += ['--mirror', self.lxc_data['lxc_mirror']] if self.lxc_data['lxc_security_mirror']: lxc_cmd += [ '--security-mirror', self.lxc_data['lxc_security_mirror'] ] # FIXME: Should be removed when LAVA's supported distro is bumped # to Debian Stretch or any distro that supports systemd lxc_cmd += ['--packages', LXC_DEFAULT_PACKAGES] else: lxc_cmd = lxc_create + [ verbose, '-t', self.lxc_data['lxc_template'], '-n', self.lxc_data['lxc_name'], '--', '--dist', self.lxc_data['lxc_distribution'], '--release', self.lxc_data['lxc_release'] ] if self.lxc_data['lxc_arch']: lxc_cmd += ['--arch', self.lxc_data['lxc_arch']] # FIXME: check if persistent name already exists and then drop allow_fail & allow_silent cmd_out = self.run_command(lxc_cmd, allow_fail=True, allow_silent=True) if isinstance(cmd_out, str): if 'exists' in cmd_out and self.lxc_data['lxc_persist']: self.logger.debug('Persistant container exists') self.results = {'status': self.lxc_data['lxc_name']} elif not cmd_out: raise JobError("Unable to create lxc container") else: self.logger.debug('Container created successfully') self.results = {'status': self.lxc_data['lxc_name']} # Create symlink in default container path ie., /var/lib/lxc defined by # LXC_PATH so that we need not add '-P' option to every lxc-* command. dst = os.path.join(LXC_PATH, self.lxc_data['lxc_name']) if self.lxc_data['custom_lxc_path'] and not os.path.exists(dst): os.symlink( os.path.join(lxc_default_path, self.lxc_data['lxc_name']), os.path.join(LXC_PATH, self.lxc_data['lxc_name'])) return connection
def version(self, binary, command): """ Returns a string with the version of the JLink binary, board's hardware and firmware. """ # if binary is not absolute, fail. msg = "Unable to retrieve version of %s" % binary try: with open(self.fname, "w") as f: f.write("f\nexit") cmd_output = subprocess.check_output(command) if not cmd_output: raise JobError(cmd_output) except (subprocess.CalledProcessError, FileNotFoundError): raise JobError(msg) output_str = cmd_output.decode("utf-8") host_ver = "none" fw_ver = "none" hw_ver = "none" temp = re.search("J-Link Commander (.+?) \\(Compiled", output_str) if temp: host_ver = temp.group(1) temp = re.search("Firmware: (.+?) compiled", output_str) if temp: fw_ver = temp.group(1) temp = re.search("Hardware version: (.+?)\n", output_str) if temp: hw_ver = temp.group(1) return "%s, SEGGER J-Link Commander %s, Firmware %s, Hardware %s" % ( binary, host_ver, fw_ver, hw_ver, )
def run(self, connection, max_end_time): connection = self.get_namespace_data( action="shared", label="shared", key="connection", deepcopy=False ) connection = super().run(connection, max_end_time) for pyocd_command in self.exec_list: pyocd = " ".join(pyocd_command) self.logger.info("PyOCD command: %s", pyocd) if not self.run_command(pyocd.split(" ")): raise JobError("%s command failed" % (pyocd.split(" "))) self.set_namespace_data( action="shared", label="shared", key="connection", value=connection ) return connection
def run(self, connection, max_end_time, args=None): """ qemu needs help to reboot after running the debian installer and typically the boot is quiet, so there is almost nothing to log. """ base_image = self.get_namespace_data(action='prepare-empty-image', label='prepare-empty-image', key='output') self.sub_command.append('-drive format=raw,file=%s' % base_image) guest = self.get_namespace_data(action='apply-overlay-guest', label='guest', key='filename') if guest: self.logger.info("Extending command line for qcow2 test overlay") self.sub_command.append('-drive format=qcow2,file=%s,media=disk' % (os.path.realpath(guest))) # push the mount operation to the test shell pre-command to be run # before the test shell tries to execute. shell_precommand_list = [] mountpoint = self.get_namespace_data(action='test', label='results', key='lava_test_results_dir') shell_precommand_list.append('mkdir %s' % mountpoint) shell_precommand_list.append('mount -L LAVA %s' % mountpoint) self.set_namespace_data(action='test', label='lava-test-shell', key='pre-command-list', value=shell_precommand_list) self.logger.info("Boot command: %s", ' '.join(self.sub_command)) shell = ShellCommand(' '.join(self.sub_command), self.timeout, logger=self.logger) if shell.exitstatus: raise JobError( "%s command exited %d: %s" % (self.sub_command, shell.exitstatus, shell.readlines())) self.logger.debug("started a shell command") shell_connection = ShellSession(self.job, shell) shell_connection = super(IsoRebootAction, self).run(shell_connection, max_end_time, args) shell_connection.prompt_str = [INSTALLER_QUIET_MSG] self.wait(shell_connection) self.set_namespace_data(action='shared', label='shared', key='connection', value=shell_connection) return shell_connection
def _lookup_switch_id(self, switch_name): msg = { 'type': 'db_query', 'command': 'db.get_switch_id_by_name', 'data': { 'name': switch_name } } self.logger.debug({"lookup_switch": msg}) response = self._call_vland(msg) if not response or response == '': raise JobError("Switch_id for switch name: %s not found" % switch_name) reply = json.loads(response) return reply['data']
def handle_testcase(params): # FIXME: move to utils data = {} for param in params: parts = param.split('=') if len(parts) == 2: key, value = parts key = key.lower() data[key] = value else: raise JobError( "Ignoring malformed parameter for signal: \"%s\". " % param) return data
def copy_overlay_to_sparse_fs(image, overlay): """copy_overlay_to_sparse_fs """ ext4_img = image + '.ext4' logger = logging.getLogger('dispatcher') guest = guestfs.GuestFS(python_return_dict=True) # Check if the given image is an Android sparse image if not is_sparse_image(image): raise JobError("Image is not an Android sparse image: %s" % image) subprocess.check_output(['/usr/bin/simg2img', image, ext4_img], stderr=subprocess.STDOUT) guest.add_drive(ext4_img) guest.launch() devices = guest.list_devices() if not devices: raise InfrastructureError("Unable to prepare guestfs") guest.mount(devices[0], '/') # FIXME: max message length issues when using tar_in # on tar.gz. Works fine with tar so decompressing # overlay first. if os.path.exists(overlay[:-3]): os.unlink(overlay[:-3]) decompressed_overlay = decompress_file(overlay, 'gz') guest.tar_in(decompressed_overlay, '/') # Check if we have space left on the mounted image. output = guest.df() logger.debug(output) device, size, used, available, percent, mountpoint = output.split( "\n")[1].split() guest.umount(devices[0]) if int(available) is 0 or percent == '100%': raise JobError("No space in image after applying overlay: %s" % image) subprocess.check_output(['/usr/bin/img2simg', ext4_img, image], stderr=subprocess.STDOUT) os.remove(ext4_img)
def run(self, connection, max_end_time, args=None): # pylint: disable=too-many-locals connection = super(FastbootFlashAction, self).run(connection, max_end_time, args) # this is the device namespace - the lxc namespace is not accessible lxc_name = None protocol = [ protocol for protocol in self.job.protocols if protocol.name == LxcProtocol.name ][0] if protocol: lxc_name = protocol.lxc_name if not lxc_name: raise JobError("Unable to use fastboot") src = self.get_namespace_data(action='download-action', label=self.command, key='file') if not src: return connection dst = copy_to_lxc(lxc_name, src, self.job.parameters['dispatcher']) sequence = self.job.device['actions']['boot']['methods'].get( 'fastboot', []) if 'no-flash-boot' in sequence and self.command in ['boot']: return connection # if a reboot is requested, will need to wait for the prompt # if not, continue in the existing mode. reboot = self.get_namespace_data(action=self.name, label='interrupt', key='reboot') if self.interrupt_prompt and reboot: connection.prompt_str = self.interrupt_prompt self.logger.debug("Changing prompt to '%s'", connection.prompt_str) self.wait(connection) serial_number = self.job.device['fastboot_serial_number'] fastboot_opts = self.job.device['fastboot_options'] fastboot_cmd = [ 'lxc-attach', '-n', lxc_name, '--', 'fastboot', '-s', serial_number, 'flash', self.command, dst ] + fastboot_opts self.logger.info("Handling %s", self.command) command_output = self.run_command(fastboot_cmd) if command_output and 'error' in command_output: raise InfrastructureError("Unable to flash %s using fastboot: %s" % (self.command, command_output)) self.results = {'label': self.command} return connection
def run(self, connection, max_end_time, args=None): connection_namespace = self.parameters.get('connection-namespace', None) parameters = None if connection_namespace: parameters = {"namespace": connection_namespace} else: parameters = { 'namespace': self.parameters.get('namespace', 'common') } connection = self.get_namespace_data(action='shared', label='shared', key='connection', deepcopy=False, parameters=parameters) if connection: self.logger.debug("Already connected") return connection elif connection_namespace: self.logger.warning( "connection_namespace provided but no connection found. " "Please ensure that this parameter is correctly set to existing namespace." ) self.logger.info("[%s] %s %s '%s'", parameters['namespace'], self.name, self.message, self.command) # ShellCommand executes the connection command shell = self.shell_class("%s\n" % self.command, self.timeout, logger=self.logger) if shell.exitstatus: raise JobError("%s command exited %d: %s" % (self.command, shell.exitstatus, shell.readlines())) # ShellSession monitors the pexpect connection = self.session_class(self.job, shell) connection.connected = True if self.hardware: connection.tags = self.tag_dict[self.hardware] connection = super(ConnectDevice, self).run(connection, max_end_time, args) if not connection.prompt_str: connection.prompt_str = [ self.job.device.get_constant('default-shell-prompt') ] self.set_namespace_data(action='shared', label='shared', key='connection', value=connection) return connection
def _lookup_port_id(self, switch_id, port): msg = { 'type': 'db_query', 'command': 'db.get_port_by_switch_and_number', 'data': { 'switch_id': switch_id, 'number': port } } self.logger.debug({"lookup_port_id": msg}) response = self._call_vland(msg) if not response or response == '': raise JobError("Port_id for port: %s not found" % port) reply = json.loads(response) return reply['data']
def signal_test_set(self, params): name = None action = params.pop(0) if action == "START": name = "testset_" + action.lower() try: self.testset_name = params[0] except IndexError: raise JobError("Test set declared without a name") self.logger.info("Starting test_set %s", self.testset_name) elif action == "STOP": self.logger.info("Closing test_set %s", self.testset_name) self.testset_name = None name = "testset_" + action.lower() return name
def untar_file(infile, outdir, member=None, outfile=None): try: tar = tarfile.open(infile) if member: file_obj = tar.extractfile(member) target = open(outfile, 'wb') target.write(file_obj.read()) target.close() file_obj.close() tar.close() else: tar.extractall(outdir) tar.close() except tarfile.TarError as exc: raise JobError("Unable to unpack %s: %s" % (infile, str(exc)))
def signal_test_case(self, params): try: data = handle_testcase(params) # get the fixup from the pattern_dict res = self.signal_match.match(data, fixupdict=self.pattern.fixupdict()) except (JobError, TestError) as exc: self.logger.error(str(exc)) return True p_res = self.get_namespace_data(action='test', label=self.signal_director.test_uuid, key='results') if not p_res: p_res = OrderedDict() self.set_namespace_data( action='test', label=self.signal_director.test_uuid, key='results', value=p_res) # prevent losing data in the update # FIXME: support parameters and retries if res["test_case_id"] in p_res: raise JobError( "Duplicate test_case_id in results: %s", res["test_case_id"]) # turn the result dict inside out to get the unique # test_case_id/testset_name as key and result as value res_data = { 'definition': self.definition, 'case': res["test_case_id"], 'result': res["result"] } # check for measurements if 'measurement' in res: try: measurement = decimal.Decimal(res['measurement']) except decimal.InvalidOperation: raise TestError("Invalid measurement %s", res['measurement']) res_data['measurement'] = measurement if 'units' in res: res_data['units'] = res['units'] if self.testset_name: res_data['set'] = self.testset_name self.report[res['test_case_id']] = { 'set': self.testset_name, 'result': res['result'] } else: self.report[res['test_case_id']] = res['result'] # Send the results back self.logger.results(res_data) # pylint: disable=no-member
def run(self, connection, max_end_time): connection = super().run(connection, max_end_time) if not self.primary: host_data = self.get_namespace_data( action=MultinodeProtocol.name, label=MultinodeProtocol.name, key=self.parameters['parameters']['hostID']) if not host_data: raise JobError("Unable to retrieve %s - missing ssh deploy?" % self.parameters['parameters']['hostID']) self.set_namespace_data( action=self.name, label='ssh-connection', key='host_address', value=host_data[self.parameters['parameters']['host_key']]) return connection
def deploy_check(cls, device, parameters): if not device: raise JobError('job "device" was None') if 'actions' not in device: raise ConfigurationError( 'Invalid device configuration, no "actions" in device configuration' ) if 'to' not in parameters: raise ConfigurationError('"to" not specified in deploy parameters') if 'deploy' not in device['actions']: raise ConfigurationError( '"deploy" is not in the device configuration actions') if 'methods' not in device['actions']['deploy']: raise ConfigurationError( 'Device misconfiguration, no "methods" in device configuration deploy actions' )
def boot_check(cls, device, parameters): if not device: raise JobError('job "device" was None') if 'method' not in parameters: raise ConfigurationError("method not specified in boot parameters") if 'actions' not in device: raise ConfigurationError( 'Invalid device configuration, no "actions" in device configuration' ) if 'boot' not in device['actions']: raise ConfigurationError( '"boot" is not in the device configuration actions') if 'methods' not in device['actions']['boot']: raise ConfigurationError( 'Device misconfiguration, no "methods" in device configuration boot action' )
def wait(self, max_end_time=None): """ Simple wait without sendling blank lines as that causes the menu to advance without data which can cause blank entries and can cause the menu to exit to an unrecognised prompt. """ if not max_end_time: timeout = self.timeout.duration else: timeout = max_end_time - time.time() if timeout < 0: raise LAVABug("Invalid max_end_time value passed to wait()") try: return self.raw_connection.expect(self.prompt_str, timeout=timeout) except (TestError, pexpect.TIMEOUT): raise JobError("wait for prompt timed out")
def run(self, connection, max_end_time, args=None): super(TftpAction, self).run(connection, max_end_time, args) tftp_size_limit = self.job.parameters['dispatcher'].get('tftp_size_limit', TFTP_SIZE_LIMIT) self.logger.debug("Checking files for TFTP limit of %s bytes.", tftp_size_limit) for (action, key) in [('compress-ramdisk', 'ramdisk'), ('download-action', 'kernel'), ('download-action', 'dtb')]: if key in self.parameters: filename = self.get_namespace_data(action=action, label='file', key=key) filename = os.path.join(tftpd_dir(), filename) fsize = os.stat(filename).st_size if fsize >= tftp_size_limit: raise JobError("Unable to send '%s' over tftp: file too large (%d > %d)" % (os.path.basename(filename), fsize, tftp_size_limit)) return connection
def run(self, connection, max_end_time, args=None): connection = super(SendRebootCommands, self).run(connection, max_end_time, args) reboot_commands = self.parameters.get('soft_reboot', []) # list if not self.parameters.get('soft_reboot', None): # unit test self.logger.warning('No soft reboot command defined in the test job. Using defaults.') reboot_commands = REBOOT_COMMAND_LIST connection.prompt_str = self.parameters.get( 'parameters', {}).get('shutdown-message', self.job.device.get_constant('shutdown-message')) connection.timeout = self.connection_timeout for cmd in reboot_commands: connection.sendline(cmd) try: self.wait(connection) except TestError: raise JobError("Soft reboot failed.") self.results = {'commands': reboot_commands} return connection
def run(self, connection, max_end_time): # pylint: disable=too-many-locals """ Retrieve the decompressed image from the dispatcher by calling the tool specified by the test writer, from within the test image of the first deployment, using the device to write directly to the secondary media, without needing to cache on the device. """ connection = super().run(connection, max_end_time) self.get_device_info() if self.deploy_image() is not True: self.logger.error("Deploy master image failed.") raise JobError('kernel or rootfs file not exist') if 'rootfs_path' in self.parameters: self.add_boot_actions() return connection
def run(self, connection, max_end_time, args=None): retries = 0 has_failed = False self.call_protocols() while retries < self.max_retries: try: connection = self.internal_pipeline.run_actions( connection, max_end_time, args) if 'repeat' not in self.parameters: # failure_retry returns on first success. repeat returns only at max_retries. return connection # Do not retry for LAVABug (as it's a bug in LAVA) except (InfrastructureError, JobError, TestError) as exc: has_failed = True # Restart max_end_time or the retry on a timeout fails with duration < 0 max_end_time = self.timeout.duration + time.time() self.timeout.start = time.time() # Print the error message retries += 1 msg = "%s failed: %d of %d attempts. '%s'" % ( self.name, retries, self.max_retries, exc) self.logger.error(msg) # Cleanup the action to allow for a safe restart self.cleanup(connection) # re-raise if this is the last loop if retries == self.max_retries: self.errors = "%s retries failed for %s" % (retries, self.name) self.set_namespace_data(action='shared', label='shared', key='connection', value=connection) raise # Wait some time before retrying time.sleep(self.sleep) self.logger.warning("Retrying: %s %s (%s sec)", self.level, self.name, max_end_time) # If we are repeating, check that all repeat were a success. if has_failed: # tried and failed raise JobError("%s retries failed for %s" % (retries, self.name)) return connection