def _delete(self, table_name, keys): batch_writer = BatchWriter(conn=self.conn, table=self._ns(table_name), max_memory=self._max_memory, latency_ms=self._latency_ms, timeout_ms=self._timeout_ms, threads=self._threads) try: for key in keys: mut = Mutation(key) mut.put(cf='', cq='', is_delete=True) batch_writer.add_mutation(mut) finally: batch_writer.close()
def _put(self, table_name, keys_and_values, counter_deletes=True): cur_bytes = 0 max_bytes = self.thrift_framed_transport_size_in_mb * 2 ** 19 batch_writer = BatchWriter(conn=self.conn, table=self._ns(table_name), max_memory=self._max_memory, latency_ms=self._latency_ms, timeout_ms=self._timeout_ms, threads=self._threads) try: # Because COUNTER is implemented via a summing accumulator, # to do a put we need to delete all of the old values before # restarting the sum. if ((self._value_types.get(table_name, str) is COUNTER and counter_deletes)): for key, blob in keys_and_values: mut = Mutation(key) mut.put(cf='', cq='', is_delete=True) batch_writer.add_mutation(mut) batch_writer.flush() for key, blob in keys_and_values: if len(key) + len(blob) + cur_bytes >= max_bytes: logger.debug( 'len(key)=%d + len(blob)=%d + cur_bytes=%d >= ' 'thrift_framed_transport_size_in_mb/2 = %d', len(key), len(blob), cur_bytes, max_bytes) logger.debug( 'pre-emptively sending only what has been ' 'batched, and will send this item in next ' 'batch.') batch_writer.flush() cur_bytes = 0 cur_bytes += len(key) + len(blob) mut = Mutation(key) mut.put(cf='', cq='', val=blob) batch_writer.add_mutation(mut) finally: batch_writer.close()