Esempio n. 1
0
    def QueryFollowers(cls,
                       client,
                       viewpoint_id,
                       excl_start_key=None,
                       limit=None):
        """Query followers belonging to the viewpoint (up to 'limit' total) for
    the specified 'viewpoint_id'. The query is for followers starting with
    (but excluding) 'excl_start_key'. The callback is invoked with an array
    of follower objects and the last queried key.
    """
        # Query the viewpoint_id secondary index with excl_start_key & limit.
        query_expr = ('follower.viewpoint_id={id}', {'id': viewpoint_id})
        start_index_key = db_client.DBKey(
            excl_start_key,
            viewpoint_id) if excl_start_key is not None else None
        follower_keys = yield gen.Task(Follower.IndexQueryKeys,
                                       client,
                                       query_expr,
                                       start_index_key=start_index_key,
                                       limit=limit)

        last_key = follower_keys[-1].hash_key if len(
            follower_keys) > 0 else None

        followers = yield gen.Task(Follower.BatchQuery, client, follower_keys,
                                   None)

        raise gen.Return((followers, last_key))
Esempio n. 2
0
    def QueryEpisodes(cls,
                      client,
                      viewpoint_id,
                      callback,
                      excl_start_key=None,
                      limit=None):
        """Queries episodes belonging to the viewpoint (up to 'limit' total) for
    the specified 'viewpoint_id'. Starts with episodes having a key greater
    than 'excl_start_key'. Returns a tuple with the array of episodes and
    the last queried key.
    """
        from viewfinder.backend.db.episode import Episode

        # Query the viewpoint_id secondary index with excl_start_key & limit.
        query_expr = ('episode.viewpoint_id={id}', {'id': viewpoint_id})
        start_index_key = db_client.DBKey(
            excl_start_key, None) if excl_start_key is not None else None
        episode_keys = yield gen.Task(Episode.IndexQueryKeys,
                                      client,
                                      query_expr,
                                      start_index_key=start_index_key,
                                      limit=limit)
        episodes = yield gen.Task(Episode.BatchQuery, client, episode_keys,
                                  None)
        callback(
            (episodes,
             episode_keys[-1].hash_key if len(episode_keys) > 0 else None))
Esempio n. 3
0
    def QueryFollowerIds(cls,
                         client,
                         viewpoint_id,
                         callback,
                         excl_start_key=None,
                         limit=None):
        """Query followers belonging to the viewpoint (up to 'limit' total) for
    the specified 'viewpoint_id'. The query is for followers starting with
    (but excluding) 'excl_start_key'. The callback is invoked with an array
    of follower user ids and the last queried key.
    """
        def _OnQueryFollowerKeys(follower_keys):
            follower_ids = [key.hash_key for key in follower_keys]
            last_key = follower_ids[-1] if len(follower_ids) > 0 else None

            callback((follower_ids, last_key))

        # Query the viewpoint_id secondary index with excl_start_key & limit.
        query_expr = ('follower.viewpoint_id={id}', {'id': viewpoint_id})
        start_index_key = db_client.DBKey(
            excl_start_key,
            viewpoint_id) if excl_start_key is not None else None
        Follower.IndexQueryKeys(client,
                                query_expr,
                                callback=_OnQueryFollowerKeys,
                                start_index_key=start_index_key,
                                limit=limit)
Esempio n. 4
0
  def Evaluate(self, client, callback, start_key, consistent_read, param_dict):
    """Queries the database for keys beginning with start_key, with a
    limit defined in the table schema. Consistent reads are disabled
    as they're unlikely to make a difference in search results (and
    are half as expensive in the DynamoDB cost model).
    """
    def _OnQuery(result):
      self._start_key = start_key
      self._last_key = result.last_key.range_key if result.last_key is not None else None
      self._matches = [_MatchResult(
          key=item['k'], data=self._Unpack(item.get('d', None))) for item in result.items]
      callback(_EvalResult(matches=self._matches, last_key=self._last_key,
                           read_units=result.read_units))

    if self._start_key and self._start_key <= start_key and \
          self._last_key and self._last_key > start_key:
      self._start_key = start_key
      self._matches = self._matches[
        bisect_right(self._matches, _MatchResult(key=start_key, data=None)):]
      callback(_EvalResult(matches=self._matches, last_key=self._last_key,
                           read_units=0))
    else:
      excl_start_key = db_client.DBKey(self._index_term, start_key) if start_key is not None else None
      client.Query(table=vf_schema.INDEX, hash_key=self._index_term,
                   range_operator=None, attributes=None, callback=_OnQuery,
                   limit=vf_schema.SCHEMA.GetTable(vf_schema.INDEX).scan_limit,
                   consistent_read=consistent_read, excl_start_key=excl_start_key)
Esempio n. 5
0
  def QueryUsers(cls, client, viewer_user_id, user_ids, callback):
    """Queries User objects for each id in the 'user_ids' list. Invokes 'callback' with a list
    of (user, forward_friend, reverse_friend) tuples. Non-existent users are omitted.
    """
    user_keys = [db_client.DBKey(user_id, None) for user_id in user_ids]
    forward_friend_keys = [db_client.DBKey(viewer_user_id, user_id) for user_id in user_ids]
    reverse_friend_keys = [db_client.DBKey(user_id, viewer_user_id) for user_id in user_ids]
    users, forward_friends, reverse_friends = \
    yield [gen.Task(User.BatchQuery, client, user_keys, None, must_exist=False),
           gen.Task(Friend.BatchQuery, client, forward_friend_keys, None, must_exist=False),
           gen.Task(Friend.BatchQuery, client, reverse_friend_keys, None, must_exist=False)]

    user_friend_list = []
    for user, forward_friend, reverse_friend in zip(users, forward_friends, reverse_friends):
      if user is not None:
        user_friend_list.append((user, forward_friend, reverse_friend))

    callback(user_friend_list)
Esempio n. 6
0
def RunOpOnTable(client, table, op, callback):
  """Runs the specified op on the table."""
  if options.options.hash_key and options.options.range_key:
    key = db_client.DBKey(eval(options.options.hash_key), eval(options.options.range_key))
  elif options.options.hash_key:
    key = db_client.DBKey(eval(options.options.hash_key), None)
  else:
    key = None

  def _OnOp(result):
    logging.info('%s: %s' % (table.name, repr(result)))
    callback()

  logging.info('executing %s on table %s' % (op, table.name))
  if op == 'create':
    client.CreateTable(table=table.name, hash_key_schema=table.hash_key_schema,
                       range_key_schema=table.range_key_schema,
                       read_units=table.read_units, write_units=table.write_units,
                       callback=_OnOp)
  elif op == 'describe':
    client.DescribeTable(table=table.name, callback=_OnOp)
  elif op == 'REALLY_DELETE_TABLE':
    client.DeleteTable(table=table.name, callback=_OnOp)
  elif op == 'get-item':
    client.GetItem(table=table.name, key=key, callback=_OnOp,
                   attributes=options.options.col_names)
  elif op == 'update-item':
    parser = AttrParser(table, raw=True)
    attrs = parser.Run(options.options.attributes)
    client.UpdateItem(table=table.name, key=key, callback=_OnOp,
                      attributes=attrs, return_values='ALL_NEW')
  elif op == 'delete-item':
    client.DeleteItem(table=table.name, key=key, callback=_OnOp,
                      return_values='ALL_OLD')
  elif op == 'delete-range':
    assert table.range_key_col, 'Table %s is not composite' % table.name
    DeleteRange(client, table, _OnOp)
  elif op == 'query':
    assert table.range_key_col, 'Table %s is not composite' % table.name
    QueryRange(client, table, _OnOp)
  elif op == 'scan':
    Scan(client, table, _OnOp)
  else:
    raise Exception('unrecognized op: %s; ignoring...' % op)
Esempio n. 7
0
        def _OnQueryIndexTerms(term_updates, result):
            old_dict = result.attributes or {}
            term_attrs = {}
            add_terms = {}  # dict of term dicts by term key
            del_terms = []  # list of term keys
            for name, update in term_updates.items():
                key = self._table.GetColumn(name).key + ':t'
                terms = set(update.value.keys()) if update.value else set()

                # Special check here; you cannot 'PUT' an empty set. Must 'DELETE'.
                if update.action == 'PUT' and not terms:
                    term_attrs[key] = db_client.UpdateAttr(value=None,
                                                           action='DELETE')
                else:
                    term_attrs[key] = db_client.UpdateAttr(
                        value=list(terms), action=update.action)

                # Compute which index terms to add and which to delete.
                if update.action == 'PUT':
                    old_terms = set(old_dict.get(key, []))
                    add_terms.update(
                        dict([(t, update.value[t])
                              for t in terms.difference(old_terms)]))
                    del_terms += old_terms.difference(terms)
                elif update.action == 'ADD':
                    add_terms.update(update.value)
                elif update.action == 'DELETE':
                    del_terms += terms

            # Add and delete all terms as necessary.
            with util.Barrier(partial(_OnUpdateIndexTerms, term_attrs)) as b:
                index_key = self._GetIndexKey()
                for term, data in add_terms.items():
                    attrs = {'d': data} if data else {}
                    client.PutItem(table=vf_schema.INDEX,
                                   callback=b.Callback(),
                                   attributes=attrs,
                                   key=db_client.DBKey(hash_key=term,
                                                       range_key=index_key))
                for term in del_terms:
                    client.DeleteItem(table=vf_schema.INDEX,
                                      callback=b.Callback(),
                                      key=db_client.DBKey(hash_key=term,
                                                          range_key=index_key))
Esempio n. 8
0
 def _GetData(self, table, hash_key, range_key, callback):
     """Fetch a single entry."""
     item = yield gen.Task(self._client.GetItem,
                           table=table.name,
                           key=db_client.DBKey(hash_key, range_key),
                           attributes=[c.key for c in table.GetColumns()],
                           must_exist=False)
     if item is None:
         callback([], 0, None)
     else:
         callback([item.attributes], 1, None)
Esempio n. 9
0
 def _OnQuery(retry_cb, count, result):
   count += len(result.items)
   if not result.last_key:
     result_cb = partial(callback, 'deleted %d items' % count)
   else:
     logging.info('deleting next %d items from %s' % (len(result.items), table.name))
     result_cb = partial(retry_cb, result.last_key, count)
   with util.Barrier(result_cb) as b:
     for item in result.items:
       key = db_client.DBKey(options.options.hash_key, item[table.range_key_col.key])
       client.DeleteItem(table=table.name, key=key, callback=b.Callback())
Esempio n. 10
0
  def RangeQuery(cls, client, hash_key, range_desc, limit, col_names, callback,
                 excl_start_key=None, consistent_read=False, count=False, scan_forward=True):
    """Executes a range query using the predicate contained in 'range_desc'
    to select a set of items. If 'limit' is not None, then the database will
    be queried until 'limit' items have been fetched, or until there are no
    more items to fetch. If 'limit' is None, then the first page of results
    is returned (i.e. whatever DynamoDB returns).

    'range_desc' is a tuple of ([range_key], ('EQ'|'LE'|'LT'|'GE'|'GT'|'BEGINS_WITH')),
    --or-- ([range_start_key, range_end_key], 'BETWEEN').

    If 'excl_start_key' is not of type DBKey, assumes that 'excl_start_key'
    only specifies the range key and build an appropriate DBKey object using
    hash_key to feed to the db client interface.

    On completion, invokes callback with a list of queried objects. If
    count is True, invokes callback with count.
    """
    if limit == 0:
      assert not count
      callback([])
      return

    if not count:
      col_set = cls._CreateColumnSet(col_names)
      attrs = [cls._table.GetColumn(name).key for name in col_set]
    else:
      attrs = None

    if excl_start_key is not None and not isinstance(excl_start_key, db_client.DBKey):
      excl_start_key = db_client.DBKey(hash_key, excl_start_key)

    instance_count = 0
    instances = []
    while True:
      remaining = limit - instance_count if limit is not None else None
      query_result = yield gen.Task(client.Query, table=cls._table.name, hash_key=hash_key,
                                    range_operator=range_desc, attributes=attrs,
                                    limit=remaining, consistent_read=consistent_read,
                                    count=count, excl_start_key=excl_start_key, scan_forward=scan_forward)

      instance_count += query_result.count
      if not count:
        for item in query_result.items:
          instance = cls._CreateFromQuery(**item)
          instances.append(instance)

      assert limit is None or instance_count <= limit, (limit, instance_count)
      if query_result.last_key is None or limit is None or instance_count == limit:
        callback(instance_count if count else instances)
        break

      excl_start_key = query_result.last_key
Esempio n. 11
0
    def _ParseIndexKey(cls, index_key):
        """Returns the object's key by parsing the index key. This is
    symmetric with _MakeIndexKey, and is used to extract the actual
    object key from results of index queries. By default, returns the
    unadulterated index_key.

    Because all keys are stored in the index table as strings, if the
    hash key column type is a number, convert here from a string to a
    number.
    """
        if cls._table.hash_key_col.value_type == 'N':
            index_key = int(index_key)
        return db_client.DBKey(hash_key=index_key, range_key=None)
Esempio n. 12
0
 def _OnQueryIndexTerms(get_result):
     terms = [
         t for term_set in get_result.attributes.values()
         for t in term_set
     ]
     with util.Barrier(_OnDeleteIndexTerms) as b:
         index_key = self._GetIndexKey()
         [
             client.DeleteItem(table=vf_schema.INDEX,
                               key=db_client.DBKey(hash_key=term,
                                                   range_key=index_key),
                               callback=b.Callback()) for term in terms
         ]
Esempio n. 13
0
def _QueryFollowedForArchive(client, user_id):
    """Queries all viewpoints followed by the requested user (excluding the default/personal viewpoint)."""
    followed = yield gen.Task(Followed.RangeQuery,
                              client,
                              hash_key=user_id,
                              range_desc=None,
                              limit=None,
                              col_names=['viewpoint_id'],
                              excl_start_key=None)

    # Get the viewpoint associated with each follower object.
    viewpoint_keys = [db_client.DBKey(f.viewpoint_id, None) for f in followed]
    follower_keys = [
        db_client.DBKey(user_id, f.viewpoint_id) for f in followed
    ]
    viewpoints, followers = yield [
        gen.Task(Viewpoint.BatchQuery,
                 client,
                 viewpoint_keys,
                 None,
                 must_exist=False),
        gen.Task(Follower.BatchQuery,
                 client,
                 follower_keys,
                 None,
                 must_exist=False)
    ]

    # Formulate the viewpoints list into a dict for JSON output.
    response = {
        'viewpoints': [
            _MakeViewpointMetadataDict(v, f)
            for v, f in zip(viewpoints, followers)
            if v is not None and not v.IsDefault()
        ]
    }

    raise gen.Return(response)
Esempio n. 14
0
 def Query(cls,
           client,
           hash_key,
           col_names,
           callback,
           must_exist=True,
           consistent_read=False):
     """Queries a object by primary hash key."""
     cls.KeyQuery(client,
                  key=db_client.DBKey(hash_key=hash_key, range_key=None),
                  col_names=col_names,
                  callback=callback,
                  must_exist=must_exist,
                  consistent_read=consistent_read)
Esempio n. 15
0
  def AllocateAssetIds(cls, client, user_id, num_ids, callback):
    """Allocates 'num_ids' new ids from the 'asset_id_seq' column in a
    block for the specified user id and returns the first id in the
    sequence (first_id, ..., first_id + num_ids] with the callback
    """
    id_seq_key = cls._table.GetColumn('asset_id_seq').key

    def _OnUpdateIdSeq(result):
      last_id = result.return_values[id_seq_key]
      first_id = last_id - num_ids
      callback(first_id)

    client.UpdateItem(table=cls._table.name,
                      key=db_client.DBKey(hash_key=user_id, range_key=None),
                      attributes={id_seq_key: db_client.UpdateAttr(value=num_ids, action='ADD')},
                      return_values='UPDATED_NEW', callback=_OnUpdateIdSeq)
Esempio n. 16
0
 def _ParseIndexKey(cls, index_key):
   """Returns a tuple representing the object's composite key by
   parsing the provided index key. This is symmetric with
   _MakeIndexKey, and is used to extract the actual object key from
   results of index queries.
   """
   colon_loc = index_key.find(':')
   assert colon_loc != -1, index_key
   hash_key_len = int(index_key[:colon_loc])
   index_key = index_key[colon_loc + 1:]
   hash_key = index_key[:hash_key_len]
   range_key = index_key[hash_key_len:]
   if cls._table.hash_key_col.value_type == 'N':
     hash_key = int(hash_key)
   if cls._table.range_key_col.value_type == 'N':
     range_key = int(range_key)
   return db_client.DBKey(hash_key=hash_key, range_key=range_key)
Esempio n. 17
0
def Scan(client, table, callback):
    """Scans an entire table.
  """
    found_entries = {}
    for prefix in options.options.hash_key_prefixes:
        found_entries[prefix] = 0
    deleted = 0
    last_key = None
    count = 0
    while True:
        result = yield gen.Task(client.Scan,
                                table.name,
                                attributes=None,
                                limit=50,
                                excl_start_key=last_key)
        count += len(result.items)

        for item in result.items:
            value = item.get('t', None)
            sort_key = item.get('k', None)
            if value is None or sort_key is None:
                continue
            for prefix in options.options.hash_key_prefixes:
                if value.startswith(prefix):
                    logging.info('matching item: %r' % item)
                    found_entries[prefix] += 1
                    if options.options.delete:
                        logging.info('deleting item: %r' % item)
                        yield gen.Task(client.DeleteItem,
                                       table=table.name,
                                       key=db_client.DBKey(value, sort_key))
                        deleted += 1
        if result.last_key:
            last_key = result.last_key
        else:
            break

    logging.info('Found entries: %r' % found_entries)
    logging.info('scanned %d items, deleted %d' % (count, deleted))
    callback()
Esempio n. 18
0
def _QueryViewpointsForArchive(client,
                               user_id,
                               viewpoint_ids,
                               get_followers=False,
                               get_activities=False,
                               get_episodes=False,
                               get_comments=False,
                               get_attributes=False):
    """Queries viewpoint metadata, as well as associated followers and episodes.
  """
    @gen.coroutine
    def _QueryFollowers():
        """Produces list of (followers, last_key) tuples, one for each viewpoint in the request."""
        tasks = []
        for vp_id in viewpoint_ids:
            if get_followers:
                tasks.append(Viewpoint.QueryFollowers(client, vp_id))
            else:
                tasks.append(util.GenConstant(None))

        follower_results = yield tasks
        raise gen.Return(follower_results)

    @gen.coroutine
    def _QueryActivities():
        """Produces list of (activities, last_key) tuples, one for each viewpoint in the request."""
        tasks = []
        for vp_id in viewpoint_ids:
            if get_activities:
                tasks.append(gen.Task(Viewpoint.QueryActivities, client,
                                      vp_id))
            else:
                tasks.append(util.GenConstant(None))

        activity_results = yield tasks
        raise gen.Return(activity_results)

    @gen.coroutine
    def _QueryEpisodes():
        """Produces list of (episodes, last_key) tuples, one for each viewpoint in the request."""
        tasks = []
        for vp_id in viewpoint_ids:
            if get_episodes:
                tasks.append(gen.Task(Viewpoint.QueryEpisodes, client, vp_id))
            else:
                tasks.append(util.GenConstant(None))

        episode_results = yield tasks
        raise gen.Return(episode_results)

    @gen.coroutine
    def _QueryComments():
        """Produces list of (comments, last_key) tuples, one for each viewpoint in the request."""
        tasks = []
        for vp_id in viewpoint_ids:
            if get_comments:
                tasks.append(gen.Task(Viewpoint.QueryComments, client, vp_id))
            else:
                tasks.append(util.GenConstant(None))

        comment_results = yield tasks
        raise gen.Return(comment_results)

    viewpoint_keys = [db_client.DBKey(vp_id, None) for vp_id in viewpoint_ids]
    follower_keys = [
        db_client.DBKey(user_id, vp_id) for vp_id in viewpoint_ids
    ]

    results = yield [
        gen.Task(Viewpoint.BatchQuery,
                 client,
                 viewpoint_keys,
                 None,
                 must_exist=False),
        gen.Task(Follower.BatchQuery,
                 client,
                 follower_keys,
                 None,
                 must_exist=False),
        _QueryFollowers(),
        _QueryActivities(),
        _QueryEpisodes(),
        _QueryComments()
    ]

    viewpoints, followers, follower_id_results, activity_results, episode_results, comment_results = results
    zip_list = zip(viewpoints, followers, follower_id_results,
                   activity_results, episode_results, comment_results)

    response_vp_dicts = []
    for viewpoint, follower, follower_result, activity_result, episode_result, comment_result in zip_list:
        # Only return the viewpoint metadata if the caller is a follower of the viewpoint.
        if follower is not None and not follower.IsRemoved():
            response_vp_dict = {'viewpoint_id': viewpoint.viewpoint_id}

            if get_attributes:
                response_vp_dict.update(
                    _MakeViewpointMetadataDict(viewpoint, follower))

            if get_followers:
                followers, last_key = follower_result
                response_vp_dict['followers'] = [
                    foll.MakeFriendMetadataDict() for foll in followers
                ]
                if last_key is not None:
                    response_vp_dict[
                        'follower_last_key'] = www_util.FormatIntegralLastKey(
                            last_key)

            if _CanViewViewpointContent(viewpoint, follower):
                if get_activities:
                    activities, last_key = activity_result
                    response_vp_dict['activities'] = [
                        act.MakeMetadataDict() for act in activities
                    ]
                    if last_key is not None:
                        response_vp_dict['activity_last_key'] = last_key

                if get_episodes:
                    episodes, last_key = episode_result
                    response_vp_dict['episodes'] = [
                        ep._asdict() for ep in episodes
                    ]
                    if last_key is not None:
                        response_vp_dict['episode_last_key'] = last_key

                if get_comments:
                    comments, last_key = comment_result
                    response_vp_dict['comments'] = [
                        co._asdict() for co in comments
                    ]
                    if last_key is not None:
                        response_vp_dict['comment_last_key'] = last_key

            response_vp_dicts.append(response_vp_dict)

    raise gen.Return({'viewpoints': response_vp_dicts})
Esempio n. 19
0
def UpgradeTable(client, table, callback):
    """Sequentially scans 'table', updating each scan item to trigger
  necessary upgrades.
  """
    upgrade_versions = []
    if options.options.migrator:
        upgrade_versions.append(getattr(versions, options.options.migrator))
    else:
        raise Exception('Upgrade requires the --migrator option.')
    if not db_import.GetTableClass(table.name):
        raise Exception('Upgrade is not supported on table %s.' % table.name)

    # Get full count of rows in the database for logging progress.
    describe = yield gen.Task(client.DescribeTable, table=table.name)
    logging.info('%s: (%d items)' % (table.name, describe.count))
    if options.options.excl_start_key:
        excl_start_key = db_client.DBKey(options.options.excl_start_key, None)
    else:
        excl_start_key = None

    # Loop while scanning in batches.  Scan will have already updated the
    # items if it was needed, so there is no need to call MaybeMigrate
    # again. If 'last_key' is None, the scan is complete.
    # Otherwise, continue with scan by supplying the last key as the exclusive start key.
    count = 0
    scan_params = {
        'client': client,
        'col_names': None,
        'limit': options.options.scan_limit,
        'excl_start_key': excl_start_key
    }
    while True:
        items, last_key = yield gen.Task(
            db_import.GetTableClass(table.name).Scan, **scan_params)

        # Maybe migrate all items.
        yield [
            gen.Task(Version.MaybeMigrate, client, item, upgrade_versions)
            for item in items
        ]

        logging.info('scanned next %d items from table %s' %
                     (len(items), table.name))

        new_count = count + len(items)
        logging.info('processed a total of %d items from table %s' %
                     (new_count, table.name))
        # Log a progress notification every 10% scanned.
        if describe.count and (new_count * 10) / describe.count > (
                count * 10) / describe.count:
            logging.info('%s: %d%%%s' %
                         (table.name, (new_count * 100) / describe.count,
                          '...' if new_count != describe.count else ''))
        if last_key is None:
            break
        elif options.options.upgrade_limit and new_count >= int(
                options.options.upgrade_limit):
            logging.info('exhausted --upgrade_limit=%s; exiting...' %
                         options.options.upgrade_limit)
            break

        # Prepare for next iteration of loop.
        scan_params['excl_start_key'] = last_key
        count = new_count

    callback()
Esempio n. 20
0
 def GetKey(self):
   """Returns the object's composite (hash, range) key."""
   return db_client.DBKey(
     hash_key=self._columns[self._table.hash_key_col.name].Get(),
     range_key=self._columns[self._table.range_key_col.name].Get())
Esempio n. 21
0
 def Query(cls, client, hash_key, range_key, col_names, callback,
           must_exist=True, consistent_read=False):
   """Queries a object by composite hash/range key."""
   cls.KeyQuery(client, key=db_client.DBKey(hash_key=hash_key, range_key=range_key),
                col_names=col_names, callback=callback, must_exist=must_exist,
                consistent_read=consistent_read)
Esempio n. 22
0
 def _OnGetKeys(items):
     rows = [self._FormatResult(item) for item in items]
     last_marker = items[-1] if len(items) > 0 else ''
     self.WriteTablePageResponse(rows,
                                 db_client.DBKey(last_marker, None))
Esempio n. 23
0
def RunDBA(callback):
  """Runs op on each table listed in --tables."""
  client = db_client.DBClient.Instance()
  op = options.options.op

  table = None
  if options.options.table:
    table = vf_schema.SCHEMA.GetTable(options.options.table)
    assert table, 'unrecognized table name: %s' % options.options.table
    cls = db_import.GetTableClass(table.name)

  key = None
  if options.options.hash_key and options.options.range_key:
    assert table.range_key_col
    key = db_client.DBKey(eval(options.options.hash_key), eval(options.options.range_key))
  elif options.options.hash_key:
    assert not table.range_key_col
    key = db_client.DBKey(eval(options.options.hash_key), None)

  start_key = eval(options.options.start_key) if options.options.start_key else None
  end_key = eval(options.options.end_key) if options.options.end_key else None
  range_desc = None
  if start_key and end_key:
    range_desc = db_client.RangeOperator([start_key, end_key], 'BETWEEN')
  elif start_key:
    range_desc = db_client.RangeOperator([start_key], 'GT')
  elif end_key:
    range_desc = db_client.RangeOperator([end_key], 'LT')

  user_id = None
  if options.options.user_id:
    user_id = eval(options.options.user_id)

  limit = None
  if options.options.limit:
    limit = eval(options.options.limit)

  if options.options.attributes:
    parser = AttrParser(table)
    attrs = parser.Run(options.options.attributes)
    if table and key:
      attrs[table.hash_key_col.name] = key.hash_key
      if key.range_key:
        attrs[table.range_key_col.name] = key.range_key
    logging.info('attributes: %s' % attrs)

  def _OnOp(*args, **kwargs):
    if args:
      logging.info('positional result args: %s' % pprint.pformat(args))
    if kwargs:
      logging.info('keyword result args: %s' % pprint.pformat(kwargs))
    callback()

  if op in ('query', 'update'):
    assert table, 'no table name specified for operation'
  if op in ('query'):
    assert table.range_key_col, 'Table %s is not composite' % table.name

  # Run the operation
  logging.info('executing %s' % op)
  if op == 'query':
    QueryRange(client, table, cls, key, range_desc, _OnOp)
  elif op == 'update':
    o = cls()
    o.UpdateFromKeywords(**attrs)
    o.Update(client, _OnOp)
  else:
    raise Exception('unrecognized op: %s' % op)
Esempio n. 24
0
def _QueryEpisodesForArchive(client, obj_store, user_id, episode_ids):
    """Queries posts from the specified episodes.
  """
    def _MakePhotoDict(post, photo, user_post, user_photo):
        ph_dict = photo.MakeMetadataDict(post, user_post, user_photo)

        # Do not return access URLs for posts which have been removed.
        if not post.IsRemoved():
            ph_dict['full_get_url'] = photo_store.GeneratePhotoUrl(
                obj_store, ph_dict['photo_id'], '.f')

        return ph_dict

    # Get all requested episodes, along with posts for each episode.
    episode_keys = [db_client.DBKey(ep_id, None) for ep_id in episode_ids]

    post_tasks = []
    for ep_id in episode_ids:
        post_tasks.append(
            gen.Task(Post.RangeQuery,
                     client,
                     ep_id,
                     None,
                     None,
                     None,
                     excl_start_key=None))

    episodes, posts_list = yield [
        gen.Task(Episode.BatchQuery,
                 client,
                 episode_keys,
                 None,
                 must_exist=False),
        gen.Multi(post_tasks)
    ]

    # Get viewpoint records for all viewpoints containing episodes.
    viewpoint_keys = [
        db_client.DBKey(viewpoint_id, None) for viewpoint_id in set(
            ep.viewpoint_id for ep in episodes if ep is not None)
    ]

    # Get follower records for all viewpoints containing episodes, along with photo and user post objects.
    follower_keys = [
        db_client.DBKey(user_id, db_key.hash_key) for db_key in viewpoint_keys
    ]

    all_posts = [
        post for posts in posts_list if posts is not None for post in posts
    ]
    photo_keys = [db_client.DBKey(post.photo_id, None) for post in all_posts]
    user_post_keys = [
        db_client.DBKey(user_id,
                        Post.ConstructPostId(post.episode_id, post.photo_id))
        for post in all_posts
    ]
    if user_id:
        # TODO(ben): we can probably skip this for the web view
        user_photo_task = gen.Task(
            UserPhoto.BatchQuery,
            client,
            [db_client.DBKey(user_id, post.photo_id) for post in all_posts],
            None,
            must_exist=False)
    else:
        user_photo_task = util.GenConstant(None)

    viewpoints, followers, photos, user_posts, user_photos = yield [
        gen.Task(Viewpoint.BatchQuery,
                 client,
                 viewpoint_keys,
                 None,
                 must_exist=False),
        gen.Task(Follower.BatchQuery,
                 client,
                 follower_keys,
                 None,
                 must_exist=False),
        gen.Task(Photo.BatchQuery, client, photo_keys, None),
        gen.Task(UserPost.BatchQuery,
                 client,
                 user_post_keys,
                 None,
                 must_exist=False),
        user_photo_task,
    ]

    # Get set of viewpoint ids to which the current user has access.
    viewable_viewpoint_ids = set(
        viewpoint.viewpoint_id
        for viewpoint, follower in zip(viewpoints, followers)
        if _CanViewViewpointContent(viewpoint, follower))

    response_dict = {'episodes': []}

    for ep_id, episode, posts in zip(episode_ids, episodes, posts_list):
        # Gather list of (post, photo, user_post) tuples for this episode.
        photo_info_list = []
        for post in posts:
            photo = photos.pop(0)
            user_post = user_posts.pop(0)
            user_photo = user_photos.pop(
                0) if user_photos is not None else None
            assert photo.photo_id == post.photo_id, (episode, post, photo)
            if user_photo:
                assert user_photo.photo_id == photo.photo_id
                assert user_photo.user_id == user_id
            photo_info_list.append((post, photo, user_post, user_photo))

        if episode is not None and episode.viewpoint_id in viewable_viewpoint_ids:
            response_ep_dict = {'episode_id': ep_id}

            response_ep_dict.update(episode._asdict())

            response_ep_dict['photos'] = [
                _MakePhotoDict(photo, post, user_post, user_photo)
                for photo, post, user_post, user_photo in photo_info_list
            ]
            if len(photo_info_list) > 0:
                response_ep_dict['last_key'] = photo_info_list[-1][0].photo_id

            response_dict['episodes'].append(response_ep_dict)

    raise gen.Return(response_dict)
Esempio n. 25
0
 def GetKey(self):
     """Returns the object's primary hash key."""
     return db_client.DBKey(
         hash_key=self._columns[self._table.hash_key_col.name].Get(),
         range_key=None)