Ejemplo n.º 1
0
    def large_batch(self, app, mutations, entity_changes, txn):
        """ Insert or delete multiple rows across tables in an atomic statement.

    Args:
      app: A string containing the application ID.
      mutations: A list of dictionaries representing mutations.
      entity_changes: A list of changes at the entity level.
      txn: A transaction ID handler.
    Raises:
      FailedBatch if a concurrent process modifies the batch status.
      AppScaleDBConnectionError if a database connection error was encountered.
    """
        self.logger.debug('Large batch: transaction {}, {} mutations'.format(
            txn, len(mutations)))
        large_batch = LargeBatch(self.session, app, txn)
        try:
            yield large_batch.start()
        except FailedBatch as batch_error:
            raise BatchNotApplied(str(batch_error))

        insert_item = ('INSERT INTO batches (app, transaction, namespace, '
                       '                     path, old_value, new_value) '
                       'VALUES (?, ?, ?, ?, ?, ?)')
        insert_statement = self.session.prepare(insert_item)

        statements_and_params = []
        for entity_change in entity_changes:
            old_value = None
            if entity_change['old'] is not None:
                old_value = bytearray(entity_change['old'].Encode())
            new_value = None
            if entity_change['new'] is not None:
                new_value = bytearray(entity_change['new'].Encode())

            parameters = (app, txn, entity_change['key'].name_space(),
                          bytearray(entity_change['key'].path().Encode()),
                          old_value, new_value)
            statements_and_params.append((insert_statement, parameters))

        try:
            yield [
                self.tornado_cassandra.execute(statement, parameters=params)
                for statement, params in statements_and_params
            ]
        except dbconstants.TRANSIENT_CASSANDRA_ERRORS:
            message = 'Unable to write large batch log'
            logger.exception(message)
            raise BatchNotApplied(message)

        # Since failing after this point is expensive and time consuming, retry
        # operations to make a failure less likely.
        custom_retry_coroutine = retry_raw_coroutine(
            backoff_threshold=5,
            retrying_timeout=10,
            retry_on_exception=dbconstants.TRANSIENT_CASSANDRA_ERRORS)

        persistent_apply_batch = custom_retry_coroutine(
            large_batch.set_applied)
        try:
            yield persistent_apply_batch()
        except FailedBatch as batch_error:
            raise AppScaleDBConnectionError(str(batch_error))

        persistent_apply_mutations = custom_retry_coroutine(
            self.apply_mutations)
        try:
            yield persistent_apply_mutations(mutations, txn)
        except dbconstants.TRANSIENT_CASSANDRA_ERRORS:
            message = 'Exception during large batch'
            logger.exception(message)
            raise AppScaleDBConnectionError(message)

        try:
            yield large_batch.cleanup()
        except FailedBatch:
            # This should not raise an exception since the batch is already applied.
            logger.exception('Unable to clear batch status')

        clear_batch = ('DELETE FROM batches '
                       'WHERE app = %(app)s AND transaction = %(transaction)s')
        parameters = {'app': app, 'transaction': txn}
        try:
            yield self.tornado_cassandra.execute(clear_batch, parameters)
        except dbconstants.TRANSIENT_CASSANDRA_ERRORS:
            logger.exception('Unable to clear batch log')
Ejemplo n.º 2
0
  def large_batch(self, app, mutations, entity_changes, txn):
    """ Insert or delete multiple rows across tables in an atomic statement.

    Args:
      app: A string containing the application ID.
      mutations: A list of dictionaries representing mutations.
      entity_changes: A list of changes at the entity level.
      txn: A transaction ID handler.
    Raises:
      FailedBatch if a concurrent process modifies the batch status.
      AppScaleDBConnectionError if a database connection error was encountered.
    """
    self.logger.debug('Large batch: transaction {}, {} mutations'.
                      format(txn, len(mutations)))
    large_batch = LargeBatch(self.session, app, txn)
    try:
      yield large_batch.start()
    except FailedBatch as batch_error:
      raise BatchNotApplied(str(batch_error))

    insert_item = (
      'INSERT INTO batches (app, transaction, namespace, '
      '                     path, old_value, new_value) '
      'VALUES (?, ?, ?, ?, ?, ?)'
    )
    insert_statement = self.session.prepare(insert_item)

    statements_and_params = []
    for entity_change in entity_changes:
      old_value = None
      if entity_change['old'] is not None:
        old_value = bytearray(entity_change['old'].Encode())
      new_value = None
      if entity_change['new'] is not None:
        new_value = bytearray(entity_change['new'].Encode())

      parameters = (app, txn, entity_change['key'].name_space(),
                    bytearray(entity_change['key'].path().Encode()), old_value,
                    new_value)
      statements_and_params.append((insert_statement, parameters))

    try:
      yield [
        self.tornado_cassandra.execute(statement, parameters=params)
        for statement, params in statements_and_params
      ]
    except dbconstants.TRANSIENT_CASSANDRA_ERRORS:
      message = 'Unable to write large batch log'
      logger.exception(message)
      raise BatchNotApplied(message)

    # Since failing after this point is expensive and time consuming, retry
    # operations to make a failure less likely.
    custom_retry_coroutine = retry_raw_coroutine(
      backoff_threshold=5, retrying_timeout=10,
      retry_on_exception=dbconstants.TRANSIENT_CASSANDRA_ERRORS)

    persistent_apply_batch = custom_retry_coroutine(large_batch.set_applied)
    try:
      yield persistent_apply_batch()
    except FailedBatch as batch_error:
      raise AppScaleDBConnectionError(str(batch_error))

    persistent_apply_mutations = custom_retry_coroutine(self.apply_mutations)
    try:
      yield persistent_apply_mutations(mutations, txn)
    except dbconstants.TRANSIENT_CASSANDRA_ERRORS:
      message = 'Exception during large batch'
      logger.exception(message)
      raise AppScaleDBConnectionError(message)

    try:
      yield large_batch.cleanup()
    except FailedBatch:
      # This should not raise an exception since the batch is already applied.
      logger.exception('Unable to clear batch status')

    clear_batch = (
      'DELETE FROM batches '
      'WHERE app = %(app)s AND transaction = %(transaction)s'
    )
    parameters = {'app': app, 'transaction': txn}
    try:
      yield self.tornado_cassandra.execute(clear_batch, parameters)
    except dbconstants.TRANSIENT_CASSANDRA_ERRORS:
      logger.exception('Unable to clear batch log')