Ejemplo n.º 1
0
  def _TraceBlock(block, skip, trace_out_file, operations, base_name):
    """Trace the origin of a given block through a sequence of operations.

    This method tries to map the given dest block to the corresponding source
    block from which its content originates in the course of an update. It
    further tries to trace transitive origins through MOVE operations. It is
    rather efficient, doing the actual tracing by means of a single reverse
    sweep through the operation sequence. It dumps a log of operations and
    source blocks responsible for the data in the given dest block to the
    provided output file.

    Args:
      block: the block number to trace
      skip: number of initial transitive origins to ignore
      trace_out_file: a file object to dump the trace to
      operations: the sequence of operations
      base_name: name of the operation sequence
    """
    # Traverse operations backwards.
    for op, op_name in common.OperationIter(operations, base_name,
                                            reverse=True):
      total_block_offset = 0
      found = False

      # Is the traced block mentioned in the dest extents?
      for dst_ex, dst_ex_name in common.ExtentIter(op.dst_extents,
                                                   op_name + '.dst_extents'):
        if (block >= dst_ex.start_block
            and block < dst_ex.start_block + dst_ex.num_blocks):
          if skip:
            skip -= 1
          else:
            total_block_offset += block - dst_ex.start_block
            trace_out_file.write(
                '%d: %s: found %s (total block offset: %d)\n' %
                (block, dst_ex_name, common.FormatExtent(dst_ex),
                 total_block_offset))
            found = True
            break

        total_block_offset += dst_ex.num_blocks

      if found:
        # Don't trace further, unless it's a MOVE.
        if op.type != common.OpType.MOVE:
          break

        # For MOVE, find corresponding source block and keep tracing.
        for src_ex, src_ex_name in common.ExtentIter(op.src_extents,
                                                     op_name + '.src_extents'):
          if total_block_offset < src_ex.num_blocks:
            block = src_ex.start_block + total_block_offset
            trace_out_file.write(
                '%s:  mapped to %s (%d)\n' %
                (src_ex_name, common.FormatExtent(src_ex), block))
            break

          total_block_offset -= src_ex.num_blocks
Ejemplo n.º 2
0
  def _ApplyReplaceOperation(self, op, op_name, out_data, part_file, part_size):
    """Applies a REPLACE{,_BZ} operation.

    Args:
      op: the operation object
      op_name: name string for error reporting
      out_data: the data to be written
      part_file: the partition file object
      part_size: the size of the partition

    Raises:
      PayloadError if something goes wrong.
    """
    block_size = self.block_size
    data_length = len(out_data)

    # Decompress data if needed.
    if op.type == common.OpType.REPLACE_BZ:
      out_data = bz2.decompress(out_data)
      data_length = len(out_data)

    # Write data to blocks specified in dst extents.
    data_start = 0
    for ex, ex_name in common.ExtentIter(op.dst_extents,
                                         '%s.dst_extents' % op_name):
      start_block = ex.start_block
      num_blocks = ex.num_blocks
      count = num_blocks * block_size

      # Make sure it's not a fake (signature) operation.
      if start_block != common.PSEUDO_EXTENT_MARKER:
        data_end = data_start + count

        # Make sure we're not running past partition boundary.
        if (start_block + num_blocks) * block_size > part_size:
          raise PayloadError(
              '%s: extent (%s) exceeds partition size (%d)' %
              (ex_name, common.FormatExtent(ex, block_size),
               part_size))

        # Make sure that we have enough data to write.
        if data_end >= data_length + block_size:
          raise PayloadError(
              '%s: more dst blocks than data (even with padding)')

        # Pad with zeros if necessary.
        if data_end > data_length:
          padding = data_end - data_length
          out_data += '\0' * padding

        self.payload.payload_file.seek(start_block * block_size)
        part_file.seek(start_block * block_size)
        part_file.write(out_data[data_start:data_end])

      data_start += count

    # Make sure we wrote all data.
    if data_start < data_length:
      raise PayloadError('%s: wrote fewer bytes (%d) than expected (%d)' %
                         (op_name, data_start, data_length))