示例#1
0
  def flush(self):
    """Flush pool contents."""
    # Write data to in-memory buffer first.
    buf = _StringWriter()
    with records.RecordsWriter(buf) as w:
      for record in self._buffer:
        w.write(record)

    str_buf = buf.to_string()
    if not self._exclusive and len(str_buf) > _FILES_API_MAX_SIZE:
      # Shouldn't really happen because of flush size.
      raise errors.Error(
          "Buffer too big. Can't write more than %s bytes in one request: "
          "risk of writes interleaving. Got: %s" %
          (_FILES_API_MAX_SIZE, len(str_buf)))

    # Write data to file.
    start_time = time.time()
    with files.open(self._filename, "a", exclusive_lock=self._exclusive) as f:
      f.write(str_buf)
      if self._ctx:
        operation.counters.Increment(
            COUNTER_IO_WRITE_BYTES, len(str_buf))(self._ctx)
    if self._ctx:
      operation.counters.Increment(
          COUNTER_IO_WRITE_MSEC,
          int((time.time() - start_time) * 1000))(self._ctx)

    # reset buffer
    self._buffer = []
    self._size = 0
    gc.collect()
示例#2
0
    def flush(self):
        """Flush pool contents."""

        buf = _StringWriter()
        with records.RecordsWriter(buf) as w:
            for record in self._buffer:
                w.write(record)

        str_buf = buf.to_string()
        if len(str_buf) > _FILES_API_MAX_SIZE:

            raise errors.Error(
                "Buffer too big. Can't write more than %s bytes in one request: "
                "risk of writes interleaving. Got: %s" %
                (_FILES_API_MAX_SIZE, len(str_buf)))

        start_time = time.time()
        with files.open(self._filename, "a") as f:
            f.write(str_buf)
            if self._ctx:
                operation.counters.Increment(COUNTER_IO_WRITE_BYTES,
                                             len(str_buf))(self._ctx)
        if self._ctx:
            operation.counters.Increment(
                COUNTER_IO_WRITE_MSEC, int(
                    (time.time() - start_time) * 1000))(self._ctx)

        self._buffer = []
        self._size = 0
示例#3
0
 def _flush(self, ctx):
   record_writer = records.RecordsWriter(
       _PassthroughWriter(super(_GoogleCloudStorageRecordOutputWriter, self),
                          ctx))
   # RecordsWriter will pad the last LevelDB block at the end
   with record_writer as w:
     for record in self._buffer:
       w.write(record)
   self._reset()
示例#4
0
    def _flush(self, ctx):
        record_writer = records.RecordsWriter(
            _PassthroughWriter(
                super(_GoogleCloudStorageRecordOutputWriter, self), ctx))

        with record_writer as w:
            for record in self._buffer:
                w.write(record)
        self._reset()
示例#5
0
def create_backup_info_file(filename, backup_info):
  """Creates a backup_info_file for the given BackupInformation model."""
  info_file = files.open(files.gs.create(filename), 'a', exclusive_lock=True)
  try:
    with records.RecordsWriter(info_file) as writer:

      writer.write('1')

      writer.write(db.model_to_protobuf(backup_info).SerializeToString())

      for kind_files in backup_info.get_kind_backup_files():
        writer.write(db.model_to_protobuf(kind_files).SerializeToString())
  finally:
    info_file.close(finalize=True)
示例#6
0
    def flush(self):
        """Flush pool contents."""
        try:

            buf = _StringWriter()
            with records.RecordsWriter(buf) as w:
                for record in self._buffer:
                    w.write(record)

            str_buf = buf.to_string()
            if not self._exclusive and len(str_buf) > _FILES_API_MAX_SIZE:

                raise errors.Error(
                    "Buffer too big. Can't write more than %s bytes in one request: "
                    "risk of writes interleaving. Got: %s" %
                    (_FILES_API_MAX_SIZE, len(str_buf)))

            start_time = time.time()
            with files.open(self._filename,
                            "a",
                            exclusive_lock=self._exclusive) as f:
                f.write(str_buf)
                if self._ctx:
                    operation.counters.Increment(COUNTER_IO_WRITE_BYTES,
                                                 len(str_buf))(self._ctx)
            if self._ctx:
                operation.counters.Increment(
                    COUNTER_IO_WRITE_MSEC,
                    int((time.time() - start_time) * 1000))(self._ctx)

            self._buffer = []
            self._size = 0
            gc.collect()
        except (files.UnknownError), e:
            logging.warning("UnknownError: %s", e)
            raise errors.RetrySliceError()