コード例 #1
0
ファイル: redis_engine.py プロジェクト: bossjones/taba
  def _ShardCheckAndSet(self, shard, keys, vkeys, values):
    op = CompoundOperation()

    # Split the request into batches to avoid locking too many keys at once.
    greenlet_args = [(
            shard,
            keys[i:i + CAS_BATCH_SIZE],
            vkeys[i:i + CAS_BATCH_SIZE],
            values[i:i + CAS_BATCH_SIZE])
        for i in xrange(0, len(keys), CAS_BATCH_SIZE)]

    greenlets = []
    for j in xrange(0, len(greenlet_args), CAS_PARALLEL_REQUESTS):
      greenlet_batch = [
          gevent.spawn(self._ShardCheckAndSetBatch, *args)
          for args in greenlet_args[j:j + CAS_PARALLEL_REQUESTS]]

      greenlets.extend(greenlet_batch)
      gevent.joinall(greenlet_batch)

    # Get the results and combine them into a single Operation.
    [op.AddOp(g.get()) for g in greenlets]
    op.response_value = []
    for sub_op in op.sub_operations:
      if sub_op.success:
        op.response_value.extend(sub_op.response_value)

    return op
コード例 #2
0
ファイル: redis_engine.py プロジェクト: bossjones/taba
    def _ShardGet(shard, keys, vkeys, values):
      op = CompoundOperation()
      for i in xrange(0, len(vkeys), batch_size):
        response_value = shard.mget([vkey for vkey in vkeys[i:i + batch_size]])
        op.AddOp(Operation(success=True, response_value=response_value))

      op.response_value = list(itertools.chain.from_iterable(
          (sub_op.response_value for sub_op in op.sub_operations)))
      return op
コード例 #3
0
ファイル: redis_engine.py プロジェクト: bossjones/taba
  def _ShardedOp(self, key_value_tuples, shard_execute_fn):
    """Perform an operation on the necessary shards in the cluster, and
    aggregate the results. The callback will be invoked for each shard, with the
    keys, virtual keys, and and values that belong to that shard.

    Args:
      key_value_tuples - List of tuples of (key, value) for the operation.
      shard_execute_fn - Callback for executing the desired operation in each
          shard. The signature is f(shard, keys, vkeys, values) => Operation

    Returns:
      A CompoundOperation with the results of the queries. The response_value
      field is the combined response_value of each sub-operation (assuming all
      the sub-operations succeeded and returned a response_value)
    """
    # Compute the shard info for all the keys, and group them by the shard in
    # which they reside.
    vkeys_by_shard = defaultdict(list)
    for i, (key, val) in enumerate(key_value_tuples):
      (shard_num, _, vkey) = self._GetShardInfo(key)
      vkeys_by_shard[shard_num].append((i, key, vkey, val))

    # Split the request for each shard into a separate greenlet to allow the
    # requests to happen in parallel.
    op = CompoundOperation()
    responses = []

    def _ShardGreenletWrapper(shard_num, vkey_tuples):
      indices, keys, vkeys, values = [list(i) for i in zip(*vkey_tuples)]

      try:
        sub_op = shard_execute_fn(self.shards[shard_num], keys, vkeys, values)
      except ConnectionError:
        client.Counter('redis_connection_error_shard_%s' % shard_num)
        sub_op = Operation(success=False, traceback=traceback.format_exc())
      except Exception:
        sub_op = Operation(success=False, traceback=traceback.format_exc())

      # Aggregate the results.
      op.AddOp(sub_op)
      if sub_op.response_value:
        responses.extend(zip(indices, sub_op.response_value))

    greenlets = [
        gevent.spawn(_ShardGreenletWrapper, shard_num, vkey_tuples)
        for shard_num, vkey_tuples in vkeys_by_shard.iteritems()]

    gevent.joinall(greenlets)

    # Sort the combined responses back into the request order.
    responses = sorted(responses, key=lambda r: r[0])
    op.response_value = [r[1] for r in responses]

    return op