def write_summary(self, data): self.worksheet2.set_column(0, 0, 5) self.worksheet2.set_column(1, 1, 2) self.worksheet2.set_column(3, 3, 27) self.worksheet2.write(3, 1, _('Failures summary'), self.format03) self.worksheet2.write(4, 1, '✔', self.format10) self.worksheet2.write( 4, 2, ( ngettext('{} Test passed', '{} Tests passed', self.total_pass).format(self.total_pass) + " - " + _('Success Rate: {:.2f}% ({}/{})').format( self.total_pass / self.total * 100, self.total_pass, self.total) ), self.format02) self.worksheet2.write(5, 1, '✘', self.format11) self.worksheet2.write( 5, 2, ( ngettext('{} Test failed', '{} Tests failed', self.total_fail) + ' - ' + _('Failure Rate: {:.2f}% ({}/{})').format( self.total_fail / self.total * 100, self.total_fail, self.total) ), self.format02) self.worksheet2.write(6, 1, '-', self.format12) self.worksheet2.write( 6, 2, ( ngettext('{} Test skipped', '{} Tests skipped', self.total_skip) + ' - ' + _('Skip Rate: {:.2f}% ({}/{})').format( self.total_skip / self.total * 100, self.total_skip, self.total) ), self.format02) self.worksheet2.write_column( 'L3', [_('Fail'), _('Skip'), _('Pass')], self.format14) self.worksheet2.write_column( 'M3', [self.total_fail, self.total_skip, self.total_pass], self.format14) # Configure the series. chart = self.workbook.add_chart({'type': 'pie'}) chart.set_legend({'position': 'none'}) chart.add_series({ 'points': [ {'fill': {'color': 'red'}}, {'fill': {'color': 'gray'}}, {'fill': {'color': 'lime'}}, ], 'categories': '=' + _("Summary") + '!$L$3:$L$5', 'values': '=' + _("Summary") + '!$M$3:$M$5'} ) # Insert the chart into the worksheet. self.worksheet2.insert_chart('F4', chart, { 'x_offset': 0, 'y_offset': 10, 'x_scale': 0.25, 'y_scale': 0.25 })
def check_in_context(self, parent, unit, field, context): value_map = context.compute_shared("field_value_map[{}]".format(field), compute_value_map, context, field) value = getattr(unit, field2prop(field)) units_with_this_value = value_map[value] n = len(units_with_this_value) if n > 1: # come up with unit_list where this unit is always at the front unit_list = list(units_with_this_value) unit_list = sorted( unit_list, key=lambda a_unit: 0 if a_unit is unit else unit_list.index(a_unit) + 1) yield parent.error( unit_list, field, Problem.not_unique, ngettext("clashes with {0} other unit", "clashes with {0} other units", n - 1).format(n - 1) + ', look at: ' + ', '.join( # XXX: the relative_to is a hack, ideally we would # allow the UI to see the fine structure of the error # message and pass appropriate path to relative_to() str(other_unit.origin.relative_to(os.getcwd())) for other_unit in units_with_this_value if other_unit is not unit))
def _load_checkpoint_unix_py32(self): _session_pathname = os.path.join(self._location, self._SESSION_FILE) # Open the location directory location_fd = os.open(self._location, os.O_DIRECTORY) logger.debug( _("Opened session directory %r as descriptor %d"), self._location, location_fd) try: # Open the current session file in the location directory session_fd = os.open(_session_pathname, os.O_RDONLY) logger.debug( _("Opened session state file %r as descriptor %d"), _session_pathname, session_fd) # Stat the file to know how much to read session_stat = os.fstat(session_fd) logger.debug( # TRANSLATORS: stat is a system call name, don't translate it _("Stat'ed session state file: %s"), session_stat) try: # Read session data logger.debug(ngettext( "Reading %d byte of session state", "Reading %d bytes of session state", session_stat.st_size), session_stat.st_size) data = os.read(session_fd, session_stat.st_size) logger.debug(ngettext( "Read %d byte of session state", "Read %d bytes of session state", len(data)), len(data)) if len(data) != session_stat.st_size: raise IOError(_("partial read?")) finally: # Close the session file logger.debug(_("Closed descriptor %d"), session_fd) os.close(session_fd) except IOError as exc: if exc.errno == errno.ENOENT: # Treat lack of 'session' file as an empty file return b'' raise else: return data finally: # Close the location directory logger.debug(_("Closed descriptor %d"), location_fd) os.close(location_fd)
def maybe_resume_session(self): # Try to use the first session that can be resumed if the user agrees resume_storage_list = self.get_resume_candidates() resume_storage = None resumed = False if resume_storage_list: print(self.C.header(_("Resume Incomplete Session"))) print( ngettext( "There is {0} incomplete session that might be resumed", "There are {0} incomplete sessions that might be resumed", len(resume_storage_list)).format(len(resume_storage_list))) for resume_storage in resume_storage_list: # Skip sessions that the user doesn't want to resume cmd = self._pick_action_cmd( [ Action('r', _("resume this session"), 'resume'), Action('n', _("next session"), 'next'), Action('c', _("create new session"), 'create') ], _("Do you want to resume session {0!a}?").format( resume_storage.id)) if cmd == 'resume': pass elif cmd == 'next': continue elif cmd == 'create' or cmd is None: self.create_manager(None) break # Skip sessions that cannot be resumed try: self.create_manager(resume_storage) except SessionResumeError: cmd = self._pick_action_cmd([ Action('i', _("ignore this problem"), 'ignore'), Action('e', _("erase this session"), 'erase') ]) if cmd == 'erase': resume_storage.remove() print(_("Session removed")) continue else: resumed = True # If we resumed maybe not rerun the same, probably broken job if resume_storage is not None: self.handle_last_job_after_resume() # Finally ignore other sessions that can be resumed break else: if resume_storage is not None and not self.ask_for_new_session(): # TRANSLATORS: This is the exit message raise SystemExit(_("Session not resumed")) # Create a fresh session if nothing got resumed self.create_manager(None) return resumed
def _load_checkpoint_unix_py32(self): _session_pathname = os.path.join(self._location, self._SESSION_FILE) # Open the location directory location_fd = os.open(self._location, os.O_DIRECTORY) logger.debug( _("Opened session directory %r as descriptor %d"), self._location, location_fd) try: # Open the current session file in the location directory session_fd = os.open(_session_pathname, os.O_RDONLY) logger.debug( _("Opened session state file %r as descriptor %d"), _session_pathname, session_fd) # Stat the file to know how much to read session_stat = os.fstat(session_fd) logger.debug( # TRANSLATORS: stat is a system call name, don't translate it _("Stat'ed session state file: %s"), session_stat) try: # Read session data logger.debug(ngettext( "Reading %d byte of session state", "Reading %d bytes of session state", session_stat.st_size), session_stat.st_size) data = os.read(session_fd, session_stat.st_size) logger.debug(ngettext( "Read %d byte of session state", "Read %d bytes of session state", len(data)), len(data)) if len(data) != session_stat.st_size: raise IOError(_("partial read?")) finally: # Close the session file logger.debug(_("Closed descriptor %d"), session_fd) os.close(session_fd) finally: # Close the location directory logger.debug(_("Closed descriptor %d"), location_fd) os.close(location_fd) return data
def checkpoint(self): """ Create a checkpoint of the session. After calling this method you can later reopen the same session with :meth:`SessionManager.load_session()`. """ logger.debug("SessionManager.checkpoint()") data = SessionSuspendHelper().suspend(self.state) logger.debug( ngettext("Saving %d byte of checkpoint data to %r", "Saving %d bytes of checkpoint data to %r", len(data)), len(data), self.storage.location) try: self.storage.save_checkpoint(data) except LockedStorageError: self.storage.break_lock() self.storage.save_checkpoint(data)
def checkpoint(self): """ Create a checkpoint of the session. After calling this method you can later reopen the same session with :meth:`SessionManager.load_session()`. """ logger.debug("SessionManager.checkpoint()") data = SessionSuspendHelper().suspend(self.state) logger.debug( ngettext( "Saving %d byte of checkpoint data to %r", "Saving %d bytes of checkpoint data to %r", len(data) ), len(data), self.storage.location) try: self.storage.save_checkpoint(data) except LockedStorageError: self.storage.break_lock() self.storage.save_checkpoint(data)
def _maybe_resume_session(self): resume_candidates = list(self.ctx.sa.get_resumable_sessions()) if self.ctx.args.session_id: requested_sessions = [s for s in resume_candidates if ( s.id == self.ctx.args.session_id)] if requested_sessions: # session_ids are unique, so there should be only 1 self._resume_session(requested_sessions[0]) return True else: raise RuntimeError("Requested session is not resumable!") elif self.is_interactive: print(self.C.header(_("Resume Incomplete Session"))) print(ngettext( "There is {0} incomplete session that might be resumed", "There are {0} incomplete sessions that might be resumed", len(resume_candidates) ).format(len(resume_candidates))) return self._run_resume_ui_loop(resume_candidates) else: return False
def write_summary(self, data): if self.total != 0: pass_rate = "{:.2f}%".format(self.total_pass / self.total * 100) fail_rate = "{:.2f}%".format(self.total_fail / self.total * 100) skip_rate = "{:.2f}%".format(self.total_skip / self.total * 100) else: pass_rate = _("N/A") fail_rate = _("N/A") skip_rate = _("N/A") self.worksheet2.set_column(0, 0, 5) self.worksheet2.set_column(1, 1, 2) self.worksheet2.set_column(3, 3, 27) self.worksheet2.write(3, 1, _('Failures summary'), self.format03) self.worksheet2.write( 4, 1, OMM['pass'].unicode_sigil, self.outcome_format_map['pass']) self.worksheet2.write( 4, 2, ( ngettext('{} Test passed', '{} Tests passed', self.total_pass).format(self.total_pass) + " - " + _('Success Rate: {} ({}/{})').format( pass_rate, self.total_pass, self.total) ), self.format02) self.worksheet2.write( 5, 1, OMM['fail'].unicode_sigil, self.outcome_format_map['fail']) self.worksheet2.write( 5, 2, ( ngettext('{} Test failed', '{} Tests failed', self.total_fail).format(self.total_fail) + ' - ' + _('Failure Rate: {} ({}/{})').format( fail_rate, self.total_fail, self.total) ), self.format02) self.worksheet2.write( 6, 1, OMM['skip'].unicode_sigil, self.outcome_format_map['skip']) self.worksheet2.write( 6, 2, ( ngettext('{} Test skipped', '{} Tests skipped', self.total_skip).format(self.total_skip) + ' - ' + _('Skip Rate: {} ({}/{})').format( skip_rate, self.total_skip, self.total) ), self.format02) self.worksheet2.write_column( 'L3', [OMM['fail'].tr_label, OMM['skip'].tr_label, OMM['pass'].tr_label], self.format14) self.worksheet2.write_column( 'M3', [self.total_fail, self.total_skip, self.total_pass], self.format14) # Configure the series. chart = self.workbook.add_chart({'type': 'pie'}) chart.set_legend({'position': 'none'}) chart.add_series({ 'points': [ {'fill': {'color': OMM['fail'].color_hex}}, {'fill': {'color': OMM['skip'].color_hex}}, {'fill': {'color': OMM['pass'].color_hex}}, ], 'categories': '=' + _("Summary") + '!$L$3:$L$5', 'values': '=' + _("Summary") + '!$M$3:$M$5'} ) # Insert the chart into the worksheet. self.worksheet2.insert_chart('F4', chart, { 'x_offset': 0, 'y_offset': 10, 'x_scale': 0.50, 'y_scale': 0.50 })
def _save_checkpoint_unix_py33(self, data): if not isinstance(data, bytes): raise TypeError("data must be bytes") logger.debug(ngettext( "Saving %d byte of data (%s)", "Saving %d bytes of data (%s)", len(data)), len(data), "UNIX, python 3.3 or newer") # Open the location directory, we need to fsync that later # XXX: this may fail, maybe we should keep the fd open all the time? location_fd = os.open(self._location, os.O_DIRECTORY) logger.debug( _("Opened %r as descriptor %d"), self._location, location_fd) try: # Open the "next" file in the location_directory # # Use openat(2) to ensure we always open a file relative to the # directory we already opened above. This is essential for fsync(2) # calls made below. # # Use "write" + "create" + "exclusive" flags so that no race # condition is possible. # # This will never return -1, it throws IOError when anything is # wrong. The caller has to catch this. # # As a special exception, this code handles EEXISTS # (FIleExistsError) and converts that to LockedStorageError # that can be especially handled by some layer above. try: next_session_fd = os.open( self._SESSION_FILE_NEXT, os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644, dir_fd=location_fd) except FileExistsError: raise LockedStorageError() logger.debug( _("Opened next session file %s as descriptor %d"), self._SESSION_FILE_NEXT, next_session_fd) try: # Write session data to disk # # I cannot find conclusive evidence but it seems that # os.write() handles partial writes internally. In case we do # get a partial write _or_ we run out of disk space, raise an # explicit IOError. num_written = os.write(next_session_fd, data) logger.debug(ngettext( "Wrote %d byte of data to descriptor %d", "Wrote %d bytes of data to descriptor %d", num_written), num_written, next_session_fd) if num_written != len(data): raise IOError(_("partial write?")) except Exception as exc: logger.warning(_("Unable to complete write: %r"), exc) # If anything goes wrong we should unlink the next file. As # with the open() call above we use unlinkat to prevent race # conditions. # TRANSLATORS: unlinking as in deleting a file logger.warning(_("Unlinking %r"), self._SESSION_FILE_NEXT) os.unlink(self._SESSION_FILE_NEXT, dir_fd=location_fd) else: # If the write was successful we must flush kernel buffers. # # We want to be sure this data is really on disk by now as we # may crash the machine soon after this method exits. logger.debug( # TRANSLATORS: please don't translate fsync() _("Calling fsync() on descriptor %d"), next_session_fd) try: os.fsync(next_session_fd) except OSError as exc: logger.warning(_("Cannot synchronize file %r: %s"), self._SESSION_FILE_NEXT, exc) finally: # Close the new session file logger.debug(_("Closing descriptor %d"), next_session_fd) os.close(next_session_fd) # Rename FILE_NEXT over FILE. # # Use renameat(2) to ensure that there is no race condition if the # location (directory) is being moved logger.debug( _("Renaming %r to %r"), self._SESSION_FILE_NEXT, self._SESSION_FILE) try: os.rename(self._SESSION_FILE_NEXT, self._SESSION_FILE, src_dir_fd=location_fd, dst_dir_fd=location_fd) except Exception as exc: # Same as above, if we fail we need to unlink the next file # otherwise any other attempts will not be able to open() it # with O_EXCL flag. logger.warning( _("Unable to rename/overwrite %r to %r: %r"), self._SESSION_FILE_NEXT, self._SESSION_FILE, exc) # TRANSLATORS: unlinking as in deleting a file logger.warning(_("Unlinking %r"), self._SESSION_FILE_NEXT) os.unlink(self._SESSION_FILE_NEXT, dir_fd=location_fd) # Flush kernel buffers on the directory. # # This should ensure the rename operation is really on disk by now. # As noted above, this is essential for being able to survive # system crash immediately after exiting this method. # TRANSLATORS: please don't translate fsync() logger.debug(_("Calling fsync() on descriptor %d"), location_fd) try: os.fsync(location_fd) except OSError as exc: logger.warning(_("Cannot synchronize directory %r: %s"), self._location, exc) finally: # Close the location directory logger.debug(_("Closing descriptor %d"), location_fd) os.close(location_fd)
def _save_checkpoint_unix_py32(self, data): # NOTE: this is like _save_checkpoint_py33 but without all the # *at() functions (openat, renameat) # # Since we cannot use those functions there is an implicit race # condition on all open() calls with another process that renames # any of the directories that are part of the opened path. # # I don't think we can really do anything about this in userspace # so this, python 3.2 specific version, just does the best effort # implementation. Some of the comments were redacted but # but keep in mind that the rename race is always there. if not isinstance(data, bytes): raise TypeError("data must be bytes") logger.debug(ngettext( "Saving %d byte of data (%s)", "Saving %d bytes of data (%s)", len(data)), len(data), "UNIX, python 3.2 or older") # Helper pathnames, needed because we don't have *at functions _next_session_pathname = os.path.join( self._location, self._SESSION_FILE_NEXT) _session_pathname = os.path.join(self._location, self._SESSION_FILE) # Open the location directory, we need to fsync that later # XXX: this may fail, maybe we should keep the fd open all the time? location_fd = os.open(self._location, os.O_DIRECTORY) logger.debug( _("Opened %r as descriptor %d"), self._location, location_fd) try: # Open the "next" file in the location_directory # # Use "write" + "create" + "exclusive" flags so that no race # condition is possible. # # This will never return -1, it throws IOError when anything is # wrong. The caller has to catch this. # # As a special exception, this code handles EEXISTS and converts # that to LockedStorageError that can be especially handled by # some layer above. try: next_session_fd = os.open( _next_session_pathname, os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644) except IOError as exc: if exc.errno == errno.EEXISTS: raise LockedStorageError() else: raise logger.debug( _("Opened next session file %s as descriptor %d"), _next_session_pathname, next_session_fd) try: # Write session data to disk # # I cannot find conclusive evidence but it seems that # os.write() handles partial writes internally. In case we do # get a partial write _or_ we run out of disk space, raise an # explicit IOError. num_written = os.write(next_session_fd, data) logger.debug(ngettext( "Wrote %d byte of data to descriptor %d", "Wrote %d bytes of data to descriptor %d", num_written), num_written, next_session_fd) if num_written != len(data): raise IOError(_("partial write?")) except Exception as exc: logger.warning(_("Unable to complete write: %r"), exc) # If anything goes wrong we should unlink the next file. # TRANSLATORS: unlinking as in deleting a file logger.warning(_("Unlinking %r"), _next_session_pathname) os.unlink(_next_session_pathname) else: # If the write was successful we must flush kernel buffers. # # We want to be sure this data is really on disk by now as we # may crash the machine soon after this method exits. logger.debug( # TRANSLATORS: please don't translate fsync() _("Calling fsync() on descriptor %d"), next_session_fd) try: os.fsync(next_session_fd) except OSError as exc: logger.warning(_("Cannot synchronize file %r: %s"), _next_session_pathname, exc) finally: # Close the new session file logger.debug(_("Closing descriptor %d"), next_session_fd) os.close(next_session_fd) # Rename FILE_NEXT over FILE. logger.debug(_("Renaming %r to %r"), _next_session_pathname, _session_pathname) try: os.rename(_next_session_pathname, _session_pathname) except Exception as exc: # Same as above, if we fail we need to unlink the next file # otherwise any other attempts will not be able to open() it # with O_EXCL flag. logger.warning( _("Unable to rename/overwrite %r to %r: %r"), _next_session_pathname, _session_pathname, exc) # Same as above, if we fail we need to unlink the next file # otherwise any other attempts will not be able to open() it # with O_EXCL flag. # TRANSLATORS: unlinking as in deleting a file logger.warning(_("Unlinking %r"), _next_session_pathname) os.unlink(_next_session_pathname) # Flush kernel buffers on the directory. # # This should ensure the rename operation is really on disk by now. # As noted above, this is essential for being able to survive # system crash immediately after exiting this method. # TRANSLATORS: please don't translate fsync() logger.debug(_("Calling fsync() on descriptor %d"), location_fd) try: os.fsync(location_fd) except OSError as exc: logger.warning(_("Cannot synchronize directory %r: %s"), self._location, exc) finally: # Close the location directory logger.debug(_("Closing descriptor %d"), location_fd) os.close(location_fd)
def _save_checkpoint_win32_py33(self, data): # NOTE: this is like _save_checkpoint_py32 but without location_fd # wich cannot be opened on windows (no os.O_DIRECTORY) # # NOTE: The windows version is relatively new and under-tested # but then again we don't expect to run tests *on* windows, only # *from* windows so hard data retention requirements are of lesser # importance. if not isinstance(data, bytes): raise TypeError("data must be bytes") logger.debug(ngettext( "Saving %d byte of data (%s)", "Saving %d bytes of data (%s)", len(data)), len(data), "Windows") # Helper pathnames, needed because we don't have *at functions _next_session_pathname = os.path.join( self._location, self._SESSION_FILE_NEXT) _session_pathname = os.path.join(self._location, self._SESSION_FILE) # Open the "next" file in the location_directory # # Use "write" + "create" + "exclusive" flags so that no race # condition is possible. # # This will never return -1, it throws IOError when anything is # wrong. The caller has to catch this. # # As a special exception, this code handles EEXISTS and converts # that to LockedStorageError that can be especially handled by # some layer above. try: next_session_fd = os.open( _next_session_pathname, os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_BINARY, 0o644) except IOError as exc: if exc.errno == errno.EEXISTS: raise LockedStorageError() else: raise logger.debug( _("Opened next session file %s as descriptor %d"), _next_session_pathname, next_session_fd) try: # Write session data to disk # # I cannot find conclusive evidence but it seems that # os.write() handles partial writes internally. In case we do # get a partial write _or_ we run out of disk space, raise an # explicit IOError. num_written = os.write(next_session_fd, data) logger.debug(ngettext( "Wrote %d byte of data to descriptor %d", "Wrote %d bytes of data to descriptor %d", num_written), num_written, next_session_fd) if num_written != len(data): raise IOError(_("partial write?")) except Exception as exc: logger.warning(_("Unable to complete write: %s"), exc) # If anything goes wrong we should unlink the next file. # TRANSLATORS: unlinking as in deleting a file logger.warning(_("Unlinking %r: %r"), _next_session_pathname, exc) os.unlink(_next_session_pathname) else: # If the write was successful we must flush kernel buffers. # # We want to be sure this data is really on disk by now as we # may crash the machine soon after this method exits. logger.debug( # TRANSLATORS: please don't translate fsync() _("Calling fsync() on descriptor %d"), next_session_fd) try: os.fsync(next_session_fd) except OSError as exc: logger.warning(_("Cannot synchronize file %r: %s"), _next_session_pathname, exc) finally: # Close the new session file logger.debug(_("Closing descriptor %d"), next_session_fd) os.close(next_session_fd) # Rename FILE_NEXT over FILE. logger.debug(_("Renaming %r to %r"), _next_session_pathname, _session_pathname) try: os.replace(_next_session_pathname, _session_pathname) except Exception as exc: # Same as above, if we fail we need to unlink the next file # otherwise any other attempts will not be able to open() it # with O_EXCL flag. logger.warning( _("Unable to rename/overwrite %r to %r: %r"), _next_session_pathname, _session_pathname, exc) # TRANSLATORS: unlinking as in deleting a file logger.warning(_("Unlinking %r"), _next_session_pathname) os.unlink(_next_session_pathname)
def _save_checkpoint_unix_py33(self, data): if not isinstance(data, bytes): raise TypeError("data must be bytes") logger.debug(ngettext( "Saving %d byte of data (UNIX, python 3.3 or newer)", "Saving %d bytes of data (UNIX, python 3.3 or newer)", len(data)), len(data)) # Open the location directory, we need to fsync that later # XXX: this may fail, maybe we should keep the fd open all the time? location_fd = os.open(self._location, os.O_DIRECTORY) logger.debug( _("Opened %r as descriptor %d"), self._location, location_fd) try: # Open the "next" file in the location_directory # # Use openat(2) to ensure we always open a file relative to the # directory we already opened above. This is essential for fsync(2) # calls made below. # # Use "write" + "create" + "exclusive" flags so that no race # condition is possible. # # This will never return -1, it throws IOError when anything is # wrong. The caller has to catch this. # # As a special exception, this code handles EEXISTS # (FIleExistsError) and converts that to LockedStorageError # that can be especially handled by some layer above. try: next_session_fd = os.open( self._SESSION_FILE_NEXT, os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644, dir_fd=location_fd) except FileExistsError: raise LockedStorageError() logger.debug( _("Opened next session file %s as descriptor %d"), self._SESSION_FILE_NEXT, next_session_fd) try: # Write session data to disk # # I cannot find conclusive evidence but it seems that # os.write() handles partial writes internally. In case we do # get a partial write _or_ we run out of disk space, raise an # explicit IOError. num_written = os.write(next_session_fd, data) logger.debug(ngettext( "Wrote %d byte of data to descriptor %d", "Wrote %d bytes of data to descriptor %d", num_written), num_written, next_session_fd) if num_written != len(data): raise IOError(_("partial write?")) except: # If anything goes wrong we should unlink the next file. As # with the open() call above we use unlinkat to prevent race # conditions. # TRANSLATORS: unlinking as in deleting a file logger.warning(_("Unlinking %r"), self._SESSION_FILE_NEXT) os.unlink(self._SESSION_FILE_NEXT, dir_fd=location_fd) else: # If the write was successful we must flush kernel buffers. # # We want to be sure this data is really on disk by now as we # may crash the machine soon after this method exits. logger.debug( # TRANSLATORS: please don't translate fsync() _("Calling fsync() on descriptor %d"), next_session_fd) os.fsync(next_session_fd) finally: # Close the new session file logger.debug(_("Closing descriptor %d"), next_session_fd) os.close(next_session_fd) # Rename FILE_NEXT over FILE. # # Use renameat(2) to ensure that there is no race condition if the # location (directory) is being moved logger.debug( _("Renaming %r to %r"), self._SESSION_FILE_NEXT, self._SESSION_FILE) try: os.rename(self._SESSION_FILE_NEXT, self._SESSION_FILE, src_dir_fd=location_fd, dst_dir_fd=location_fd) except: # Same as above, if we fail we need to unlink the next file # otherwise any other attempts will not be able to open() it # with O_EXCL flag. # TRANSLATORS: unlinking as in deleting a file logger.warning(_("Unlinking %r"), self._SESSION_FILE_NEXT) os.unlink(self._SESSION_FILE_NEXT, dir_fd=location_fd) # Flush kernel buffers on the directory. # # This should ensure the rename operation is really on disk by now. # As noted above, this is essential for being able to survive # system crash immediately after exiting this method. # TRANSLATORS: please don't translate fsync() logger.debug(_("Calling fsync() on descriptor %d"), location_fd) os.fsync(location_fd) finally: # Close the location directory logger.debug(_("Closing descriptor %d"), location_fd) os.close(location_fd)
def _save_checkpoint_unix_py32(self, data): # NOTE: this is like _save_checkpoint_py33 but without all the # *at() functions (openat, renameat) # # Since we cannot those functions there is an implicit race condition # on all open() calls with another process that renames any of # the directories that are part of the opened path. # # I don't think we can really do anything about this in userspace # so this, python 3.2 specific version, just does the best effort # implementation. Some of the comments were redacted but # but keep in mind that the rename race is always there. if not isinstance(data, bytes): raise TypeError("data must be bytes") logger.debug(ngettext( "Saving %d byte of data (UNIX, python 3.2 or older)", "Saving %d bytes of data (UNIX, python 3.2 or older)", len(data)), len(data)) # Helper pathnames, needed because we don't have *at functions _next_session_pathname = os.path.join( self._location, self._SESSION_FILE_NEXT) _session_pathname = os.path.join(self._location, self._SESSION_FILE) # Open the location directory, we need to fsync that later # XXX: this may fail, maybe we should keep the fd open all the time? location_fd = os.open(self._location, os.O_DIRECTORY) logger.debug( _("Opened %r as descriptor %d"), self._location, location_fd) try: # Open the "next" file in the location_directory # # Use "write" + "create" + "exclusive" flags so that no race # condition is possible. # # This will never return -1, it throws IOError when anything is # wrong. The caller has to catch this. # # As a special exception, this code handles EEXISTS and converts # that to LockedStorageError that can be especially handled by # some layer above. try: next_session_fd = os.open( _next_session_pathname, os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644) except IOError as exc: if exc.errno == errno.EEXISTS: raise LockedStorageError() else: raise logger.debug( _("Opened next session file %s as descriptor %d"), _next_session_pathname, next_session_fd) try: # Write session data to disk # # I cannot find conclusive evidence but it seems that # os.write() handles partial writes internally. In case we do # get a partial write _or_ we run out of disk space, raise an # explicit IOError. num_written = os.write(next_session_fd, data) logger.debug(ngettext( "Wrote %d byte of data to descriptor %d", "Wrote %d bytes of data to descriptor %d", num_written), num_written, next_session_fd) if num_written != len(data): raise IOError(_("partial write?")) except: # If anything goes wrong we should unlink the next file. # TRANSLATORS: unlinking as in deleting a file logger.warning(_("Unlinking %r"), _next_session_pathname) os.unlink(_next_session_pathname) else: # If the write was successful we must flush kernel buffers. # # We want to be sure this data is really on disk by now as we # may crash the machine soon after this method exits. logger.debug( # TRANSLATORS: please don't translate fsync() _("Calling fsync() on descriptor %d"), next_session_fd) os.fsync(next_session_fd) finally: # Close the new session file logger.debug(_("Closing descriptor %d"), next_session_fd) os.close(next_session_fd) # Rename FILE_NEXT over FILE. logger.debug(_("Renaming %r to %r"), _next_session_pathname, _session_pathname) try: os.rename(_next_session_pathname, _session_pathname) except: # Same as above, if we fail we need to unlink the next file # otherwise any other attempts will not be able to open() it # with O_EXCL flag. # TRANSLATORS: unlinking as in deleting a file logger.warning(_("Unlinking %r"), _next_session_pathname) os.unlink(_next_session_pathname) # Flush kernel buffers on the directory. # # This should ensure the rename operation is really on disk by now. # As noted above, this is essential for being able to survive # system crash immediately after exiting this method. # TRANSLATORS: please don't translate fsync() logger.debug(_("Calling fsync() on descriptor %d"), location_fd) os.fsync(location_fd) finally: # Close the location directory logger.debug(_("Closing descriptor %d"), location_fd) os.close(location_fd)
def write_summary(self, data): self.worksheet2.set_column(0, 0, 5) self.worksheet2.set_column(1, 1, 2) self.worksheet2.set_column(3, 3, 27) self.worksheet2.write(3, 1, _('Failures summary'), self.format03) self.worksheet2.write(4, 1, '✔', self.format10) self.worksheet2.write( 4, 2, (ngettext('{} Test passed', '{} Tests passed', self.total_pass).format(self.total_pass) + " - " + _('Success Rate: {:.2f}% ({}/{})').format( self.total_pass / self.total * 100, self.total_pass, self.total)), self.format02) self.worksheet2.write(5, 1, '✘', self.format11) self.worksheet2.write( 5, 2, (ngettext('{} Test failed', '{} Tests failed', self.total_fail) + ' - ' + _('Failure Rate: {:.2f}% ({}/{})').format( self.total_fail / self.total * 100, self.total_fail, self.total)), self.format02) self.worksheet2.write(6, 1, '-', self.format12) self.worksheet2.write( 6, 2, (ngettext('{} Test skipped', '{} Tests skipped', self.total_skip) + ' - ' + _('Skip Rate: {:.2f}% ({}/{})').format( self.total_skip / self.total * 100, self.total_skip, self.total)), self.format02) self.worksheet2.write_column( 'L3', [_('Fail'), _('Skip'), _('Pass')], self.format14) self.worksheet2.write_column( 'M3', [self.total_fail, self.total_skip, self.total_pass], self.format14) # Configure the series. chart = self.workbook.add_chart({'type': 'pie'}) chart.set_legend({'position': 'none'}) chart.add_series({ 'points': [ { 'fill': { 'color': 'red' } }, { 'fill': { 'color': 'gray' } }, { 'fill': { 'color': 'lime' } }, ], 'categories': '=' + _("Summary") + '!$L$3:$L$5', 'values': '=' + _("Summary") + '!$M$3:$M$5' }) # Insert the chart into the worksheet. self.worksheet2.insert_chart('F4', chart, { 'x_offset': 0, 'y_offset': 10, 'x_scale': 0.25, 'y_scale': 0.25 })