示例#1
0
  def _TraceBlock(block, skip, trace_out_file, operations, base_name):
    """Trace the origin of a given block through a sequence of operations.

    This method tries to map the given dest block to the corresponding source
    block from which its content originates in the course of an update. It
    further tries to trace transitive origins through MOVE operations. It is
    rather efficient, doing the actual tracing by means of a single reverse
    sweep through the operation sequence. It dumps a log of operations and
    source blocks responsible for the data in the given dest block to the
    provided output file.

    Args:
      block: the block number to trace
      skip: number of initial transitive origins to ignore
      trace_out_file: a file object to dump the trace to
      operations: the sequence of operations
      base_name: name of the operation sequence
    """
    # Traverse operations backwards.
    for op, op_name in common.OperationIter(operations, base_name,
                                            reverse=True):
      total_block_offset = 0
      found = False

      # Is the traced block mentioned in the dest extents?
      for dst_ex, dst_ex_name in common.ExtentIter(op.dst_extents,
                                                   op_name + '.dst_extents'):
        if (block >= dst_ex.start_block
            and block < dst_ex.start_block + dst_ex.num_blocks):
          if skip:
            skip -= 1
          else:
            total_block_offset += block - dst_ex.start_block
            trace_out_file.write(
                '%d: %s: found %s (total block offset: %d)\n' %
                (block, dst_ex_name, common.FormatExtent(dst_ex),
                 total_block_offset))
            found = True
            break

        total_block_offset += dst_ex.num_blocks

      if found:
        # Don't trace further, unless it's a MOVE.
        if op.type != common.OpType.MOVE:
          break

        # For MOVE, find corresponding source block and keep tracing.
        for src_ex, src_ex_name in common.ExtentIter(op.src_extents,
                                                     op_name + '.src_extents'):
          if total_block_offset < src_ex.num_blocks:
            block = src_ex.start_block + total_block_offset
            trace_out_file.write(
                '%s:  mapped to %s (%d)\n' %
                (src_ex_name, common.FormatExtent(src_ex), block))
            break

          total_block_offset -= src_ex.num_blocks
def _WriteExtents(file_obj, data, extents, block_size, base_name):
    """Writes data to file as defined by extent sequence.

  This tries to be efficient by not copy data as it is written in chunks.

  Args:
    file_obj: file object
    data: data to write
    extents: sequence of block extents (offset and length)
    block_size: size of each block
    base_name: name string of extent sequence for error reporting

  Raises:
    PayloadError when things don't add up.
  """
    data_offset = 0
    data_length = len(data)
    for ex, ex_name in common.ExtentIter(extents, base_name):
        if not data_length:
            raise PayloadError('%s: more write extents than data' % ex_name)
        write_length = min(data_length, ex.num_blocks * block_size)
        file_obj.seek(ex.start_block * block_size)
        file_obj.write(data[data_offset:(data_offset + write_length)])

        data_offset += write_length
        data_length -= write_length

    if data_length:
        raise PayloadError('%s: more data than write extents' % base_name)
示例#3
0
  def _ApplyReplaceOperation(self, op, op_name, out_data, part_file, part_size):
    """Applies a REPLACE{,_BZ} operation.

    Args:
      op: the operation object
      op_name: name string for error reporting
      out_data: the data to be written
      part_file: the partition file object
      part_size: the size of the partition

    Raises:
      PayloadError if something goes wrong.
    """
    block_size = self.block_size
    data_length = len(out_data)

    # Decompress data if needed.
    if op.type == common.OpType.REPLACE_BZ:
      out_data = bz2.decompress(out_data)
      data_length = len(out_data)

    # Write data to blocks specified in dst extents.
    data_start = 0
    for ex, ex_name in common.ExtentIter(op.dst_extents,
                                         '%s.dst_extents' % op_name):
      start_block = ex.start_block
      num_blocks = ex.num_blocks
      count = num_blocks * block_size

      # Make sure it's not a fake (signature) operation.
      if start_block != common.PSEUDO_EXTENT_MARKER:
        data_end = data_start + count

        # Make sure we're not running past partition boundary.
        if (start_block + num_blocks) * block_size > part_size:
          raise PayloadError(
              '%s: extent (%s) exceeds partition size (%d)' %
              (ex_name, common.FormatExtent(ex, block_size),
               part_size))

        # Make sure that we have enough data to write.
        if data_end >= data_length + block_size:
          raise PayloadError(
              '%s: more dst blocks than data (even with padding)')

        # Pad with zeros if necessary.
        if data_end > data_length:
          padding = data_end - data_length
          out_data += '\0' * padding

        self.payload.payload_file.seek(start_block * block_size)
        part_file.seek(start_block * block_size)
        part_file.write(out_data[data_start:data_end])

      data_start += count

    # Make sure we wrote all data.
    if data_start < data_length:
      raise PayloadError('%s: wrote fewer bytes (%d) than expected (%d)' %
                         (op_name, data_start, data_length))
    def _BytesInExtents(self, extents, base_name):
        """Counts the length of extents in bytes.

    Args:
      extents: The list of Extents.
      base_name: For error reporting.

    Returns:
      The number of bytes in extents.
    """

        length = 0
        # pylint: disable=unused-variable
        for ex, ex_name in common.ExtentIter(extents, base_name):
            length += ex.num_blocks * self.block_size
        return length
示例#5
0
def _ExtentsToBspatchArg(extents, block_size, base_name, data_length=-1):
    """Translates an extent sequence into a bspatch-compatible string argument.

  Args:
    extents: sequence of block extents (offset and length)
    block_size: size of each block
    base_name: name string of extent sequence for error reporting
    data_length: the actual total length of the data in bytes (optional)

  Returns:
    A tuple consisting of (i) a string of the form
    "off_1:len_1,...,off_n:len_n", (ii) an offset where zero padding is needed
    for filling the last extent, (iii) the length of the padding (zero means no
    padding is needed and the extents cover the full length of data).

  Raises:
    PayloadError if data_length is too short or too long.
  """
    arg = ''
    pad_off = pad_len = 0
    if data_length < 0:
        data_length = sys.maxint
    for ex, ex_name in common.ExtentIter(extents, base_name):
        if not data_length:
            raise PayloadError('%s: more extents than total data length' %
                               ex_name)

        is_pseudo = ex.start_block == common.PSEUDO_EXTENT_MARKER
        start_byte = -1 if is_pseudo else ex.start_block * block_size
        num_bytes = ex.num_blocks * block_size
        if data_length < num_bytes:
            # We're only padding a real extent.
            if not is_pseudo:
                pad_off = start_byte + data_length
                pad_len = num_bytes - data_length

            num_bytes = data_length

        arg += '%s%d:%d' % (arg and ',', start_byte, num_bytes)
        data_length -= num_bytes

    if data_length:
        raise PayloadError('%s: extents not covering full data length' %
                           base_name)

    return arg, pad_off, pad_len
    def _ApplyZeroOperation(self, op, op_name, part_file):
        """Applies a ZERO operation.

    Args:
      op: the operation object
      op_name: name string for error reporting
      part_file: the partition file object

    Raises:
      PayloadError if something goes wrong.
    """
        block_size = self.block_size
        base_name = '%s.dst_extents' % op_name

        # Iterate over the extents and write zero.
        # pylint: disable=unused-variable
        for ex, ex_name in common.ExtentIter(op.dst_extents, base_name):
            part_file.seek(ex.start_block * block_size)
            part_file.write(b'\0' * (ex.num_blocks * block_size))