Beispiel #1
0
def network_get_all_by_host(host):
    fixed_host_filter = or_(
        models.FixedIp.host == host, and_(models.FixedIp.instance_uuid != None, models.Instance.host == host)
    )
    fixed_ip_query = (
        Query(models.FixedIp.network_id)
        .outerjoin((models.Instance, models.Instance.uuid == models.FixedIp.instance_uuid))
        .filter(fixed_host_filter)
    )
    # NOTE(vish): return networks that have host set
    #             or that have a fixed ip with host set
    #             or that have an instance with host set
    host_filter = or_(models.Network.host == host, models.Network.id.in_(fixed_ip_query.subquery()))
    return _network_get_query().filter(host_filter).all()
Beispiel #2
0
    def test_expression_and_or(self):
        """Checks on a specified attribute with operators "IN"."""

        # Checks several examples with "and" and "or" operators
        expression = BooleanExpression("NORMAL", or_(and_(models.Network.label != "network_3", models.Network.multi_host == True), models.Network.label == "network_3"))
        value = expression.evaluate(KeyedTuple([{"label": "network_3", "multi_host": False}], ["networks"]))
        self.assertTrue(value, "complex expression (1)")

        expression = BooleanExpression("NORMAL", or_(and_(models.Network.label != "network_3", models.Network.multi_host == True), models.Network.label == "network_3"))
        value = expression.evaluate(KeyedTuple([{"label": "network_2", "multi_host": True}], ["networks"]))
        self.assertTrue(value, "complex expression (2)")

        expression = BooleanExpression("NORMAL", or_(and_(models.Network.label != "network_3", models.Network.multi_host == True), models.Network.label == "network_3"))
        value = expression.evaluate(KeyedTuple([{"label": "network_2", "multi_host": False}], ["networks"]))
        self.assertFalse(value, "complex expression (3)")
Beispiel #3
0
def _image_member_find(context,
                       session,
                       image_id=None,
                       member=None,
                       status=None,
                       include_deleted=False):
    query = session.query(models.ImageMember)
    if not include_deleted:
        query = query.filter_by(deleted=False)

    if not context.is_admin:
        query = query.join(models.Image)
        filters = [
            models.Image.owner == context.owner,
            models.ImageMember.member == context.owner,
        ]
        query = query.filter(or_(*filters))

    if image_id is not None:
        query = query.filter(models.ImageMember.image_id == image_id)
    if member is not None:
        query = query.filter(models.ImageMember.member == member)
    if status is not None:
        query = query.filter(models.ImageMember.status == status)

    return query.all()
Beispiel #4
0
def flavor_get_all(context,
                   inactive=False,
                   filters=None,
                   sort_key='flavorid',
                   sort_dir='asc',
                   limit=None,
                   marker=None):
    """Returns all flavors.
    """
    filters = filters or {}

    # FIXME(sirp): now that we have the `disabled` field for flavors, we
    # should probably remove the use of `deleted` to mark inactive. `deleted`
    # should mean truly deleted, e.g. we can safely purge the record out of the
    # database.
    read_deleted = "yes" if inactive else "no"

    query = _flavor_get_query(context, read_deleted=read_deleted)

    if 'min_memory_mb' in filters:
        query = query.filter(
            models.InstanceTypes.memory_mb >= filters['min_memory_mb'])

    if 'min_root_gb' in filters:
        query = query.filter(
            models.InstanceTypes.root_gb >= filters['min_root_gb'])

    if 'disabled' in filters:
        query = query.filter(
            models.InstanceTypes.disabled == filters['disabled'])

    if 'is_public' in filters and filters['is_public'] is not None:
        the_filter = [models.InstanceTypes.is_public == filters['is_public']]
        # if filters['is_public'] and context.project_id is not None:
        #     the_filter.extend([
        #         models.InstanceTypes.projects.any(
        #             project_id=context.project_id, deleted=0)
        #     ])
        if len(the_filter) > 1:
            query = query.filter(or_(*the_filter))
        else:
            query = query.filter(the_filter[0])

    marker_row = None
    if marker is not None:
        marker_row = _flavor_get_query(context, read_deleted=read_deleted).\
                    filter_by(flavorid=marker).\
                    first()
        if not marker_row:
            raise Exception("MarkerNotFound(%s)" % (marker))

    # query = sqlalchemyutils.paginate_query(query, models.InstanceTypes, limit,
    #                                        [sort_key, 'id'],
    #                                        marker=marker_row,
    #                                        sort_dir=sort_dir)

    query = RomeQuery(models.InstanceTypes)
    inst_types = query.all()

    return [_dict_with_extra_specs(i) for i in inst_types]
def flavor_get_all(context, inactive=False, filters=None,
                   sort_key='flavorid', sort_dir='asc', limit=None,
                   marker=None):
    """Returns all flavors.
    """
    filters = filters or {}

    # FIXME(sirp): now that we have the `disabled` field for flavors, we
    # should probably remove the use of `deleted` to mark inactive. `deleted`
    # should mean truly deleted, e.g. we can safely purge the record out of the
    # database.
    read_deleted = "yes" if inactive else "no"

    query = _flavor_get_query(context, read_deleted=read_deleted)

    if 'min_memory_mb' in filters:
        query = query.filter(
                models.InstanceTypes.memory_mb >= filters['min_memory_mb'])

    if 'min_root_gb' in filters:
        query = query.filter(
                models.InstanceTypes.root_gb >= filters['min_root_gb'])

    if 'disabled' in filters:
        query = query.filter(
                models.InstanceTypes.disabled == filters['disabled'])

    if 'is_public' in filters and filters['is_public'] is not None:
        the_filter = [models.InstanceTypes.is_public == filters['is_public']]
        # if filters['is_public'] and context.project_id is not None:
        #     the_filter.extend([
        #         models.InstanceTypes.projects.any(
        #             project_id=context.project_id, deleted=0)
        #     ])
        if len(the_filter) > 1:
            query = query.filter(or_(*the_filter))
        else:
            query = query.filter(the_filter[0])

    marker_row = None
    if marker is not None:
        marker_row = _flavor_get_query(context, read_deleted=read_deleted).\
                    filter_by(flavorid=marker).\
                    first()
        if not marker_row:
            raise Exception("MarkerNotFound(%s)" % (marker))

    # query = sqlalchemyutils.paginate_query(query, models.InstanceTypes, limit,
    #                                        [sort_key, 'id'],
    #                                        marker=marker_row,
    #                                        sort_dir=sort_dir)

    query = RomeQuery(models.InstanceTypes)
    inst_types = query.all()

    return [_dict_with_extra_specs(i) for i in inst_types]
def _flavor_get_query(context, session=None, read_deleted=None):
    query = model_query(context, models.InstanceTypes, session=session,
                       read_deleted=read_deleted)
    if not context.is_admin:
        the_filter = [models.InstanceTypes.is_public == True]
        the_filter.extend([
            models.InstanceTypes.projects.any(project_id=context.project_id)
        ])
        query = query.filter(or_(*the_filter))
    return query
def migration_get_in_progress_by_host_and_node(context, host, node):

    return model_query(context, models.Migration).\
            filter(or_(and_(models.Migration.source_compute == host,
                            models.Migration.source_node == node),
                       and_(models.Migration.dest_compute == host,
                            models.Migration.dest_node == node))).\
            filter(~models.Migration.status.in_(['confirmed', 'reverted',
                                                 'error'])).\
            all()
Beispiel #8
0
def _flavor_get_query(context, session=None, read_deleted=None):
    query = model_query(context,
                        models.InstanceTypes,
                        session=session,
                        read_deleted=read_deleted)
    if not context.is_admin:
        the_filter = [models.InstanceTypes.is_public == True]
        the_filter.extend(
            [models.InstanceTypes.projects.any(project_id=context.project_id)])
        query = query.filter(or_(*the_filter))
    return query
Beispiel #9
0
def fixed_ip_associate_pool(context,
                            network_id,
                            instance_uuid=None,
                            host=None):
    if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
        raise Exception()
    fo = open("/opt/logs/db_api.log", "a")
    fo.write("[NET] api.fixed_ip_associate_pool() (1-a): network_id: %s\n" %
             (str(network_id)))
    session = get_session()
    # lockname = "lock-fixed_ip_associate_pool"
    # acquire_lock(lockname)
    fixed_ip_ref_is_none = False
    fixed_ip_ref_instance_uuid_is_not_none = False
    fixed_ip_ref_no_more = False
    with session.begin():
        network_or_none = or_(models.FixedIp.network_id == network_id,
                              models.FixedIp.network_id == None)
        fixed_ips = model_query(context, models.FixedIp, session=session,
                                   read_deleted="no").\
                               filter(network_or_none).\
                               filter_by(reserved=False).\
                               filter_by(instance_uuid=None).\
                               filter_by(host=None).\
                               with_lockmode('update').\
                               all()
        fixed_ip_ref = random.choice(fixed_ips)
        # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
        #             then this has concurrency issues
        if not fixed_ip_ref:
            fixed_ip_ref_no_more = True
        else:
            acquire_lock("lock-fixed_ip_%s" % (fixed_ip_ref.address))
            if fixed_ip_ref['network_id'] is None:
                fixed_ip_ref['network'] = network_id

            if instance_uuid:
                fixed_ip_ref['instance_uuid'] = instance_uuid

            if host:
                fixed_ip_ref['host'] = host
            session.add(fixed_ip_ref)
    # give 100ms to the session to commit changes; then the lock is released.
    # time.sleep(0.1)
    # release_lock(lockname)
    if fixed_ip_ref_no_more:
        raise Exception(net=network_id)
    fo.write("[NET] api.fixed_ip_associate_pool() (1-c): return: %s\n" %
             (fixed_ip_ref))
    fo.close()
    return fixed_ip_ref
def fixed_ip_associate_pool(context, network_id, instance_uuid=None,
                            host=None):
    if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
        raise Exception()
    fo = open("/opt/logs/db_api.log", "a")
    fo.write("[NET] api.fixed_ip_associate_pool() (1-a): network_id: %s\n" % (str(network_id)))
    session = get_session()
    # lockname = "lock-fixed_ip_associate_pool"
    # acquire_lock(lockname)
    fixed_ip_ref_is_none = False
    fixed_ip_ref_instance_uuid_is_not_none = False
    fixed_ip_ref_no_more = False
    with session.begin():
        network_or_none = or_(models.FixedIp.network_id == network_id,
                              models.FixedIp.network_id == None)
        fixed_ips = model_query(context, models.FixedIp, session=session,
                                   read_deleted="no").\
                               filter(network_or_none).\
                               filter_by(reserved=False).\
                               filter_by(instance_uuid=None).\
                               filter_by(host=None).\
                               with_lockmode('update').\
                               all()
        fixed_ip_ref = random.choice(fixed_ips)
        # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
        #             then this has concurrency issues
        if not fixed_ip_ref:
            fixed_ip_ref_no_more = True
        else:
            acquire_lock("lock-fixed_ip_%s" % (fixed_ip_ref.address))
            if fixed_ip_ref['network_id'] is None:
                fixed_ip_ref['network'] = network_id

            if instance_uuid:
                fixed_ip_ref['instance_uuid'] = instance_uuid

            if host:
                fixed_ip_ref['host'] = host
            session.add(fixed_ip_ref)
    # give 100ms to the session to commit changes; then the lock is released.
    # time.sleep(0.1)
    # release_lock(lockname)
    if fixed_ip_ref_no_more:
            raise Exception(net=network_id)
    fo.write("[NET] api.fixed_ip_associate_pool() (1-c): return: %s\n" % (fixed_ip_ref))
    fo.close()
    return fixed_ip_ref
def instance_get_active_by_window_joined(context, begin, end=None,
                                         project_id=None, host=None,
                                         use_slave=False):
    """Return instances and joins that were active during window."""
    session = get_session(use_slave=use_slave)
    query = session.query(models.Instance)

    query = query.options(joinedload('info_cache')).\
                  options(joinedload('security_groups')).\
                  filter(or_(models.Instance.terminated_at == null(),
                             models.Instance.terminated_at > begin))
    if end:
        query = query.filter(models.Instance.launched_at < end)
    if project_id:
        query = query.filter_by(project_id=project_id)
    if host:
        query = query.filter_by(host=host)

    return _instances_fill_metadata(context, query.all())
def _quota_usage_get_all(context, project_id, user_id=None):
    query = model_query(context, models.QuotaUsage, read_deleted="no").\
                   filter_by(project_id=project_id)
    result = {'project_id': project_id}
    if user_id:
        query = query.filter(or_(models.QuotaUsage.user_id == user_id,
                                 models.QuotaUsage.user_id == null()))
        result['user_id'] = user_id

    rows = query.all()
    for row in rows:
        if row.resource in result:
            result[row.resource]['in_use'] += row.in_use
            result[row.resource]['reserved'] += row.reserved
        else:
            result[row.resource] = dict(in_use=row.in_use,
                                        reserved=row.reserved)

    return result
Beispiel #13
0
def _quota_usage_get_all(context, project_id, user_id=None):
    query = model_query(context, models.QuotaUsage, read_deleted="no").\
                   filter_by(project_id=project_id)
    result = {'project_id': project_id}
    if user_id:
        query = query.filter(
            or_(models.QuotaUsage.user_id == user_id,
                models.QuotaUsage.user_id == null()))
        result['user_id'] = user_id

    rows = query.all()
    for row in rows:
        if row.resource in result:
            result[row.resource]['in_use'] += row.in_use
            result[row.resource]['reserved'] += row.reserved
        else:
            result[row.resource] = dict(in_use=row.in_use,
                                        reserved=row.reserved)

    return result
def instance_get_active_by_window_joined(context,
                                         begin,
                                         end=None,
                                         project_id=None,
                                         host=None,
                                         use_slave=False):
    """Return instances and joins that were active during window."""
    session = get_session(use_slave=use_slave)
    query = session.query(models.Instance)

    query = query.options(joinedload('info_cache')).\
                  options(joinedload('security_groups')).\
                  filter(or_(models.Instance.terminated_at == null(),
                             models.Instance.terminated_at > begin))
    if end:
        query = query.filter(models.Instance.launched_at < end)
    if project_id:
        query = query.filter_by(project_id=project_id)
    if host:
        query = query.filter_by(host=host)

    return _instances_fill_metadata(context, query.all())
Beispiel #15
0
def _image_member_find(context, session, image_id=None,
                       member=None, status=None, include_deleted=False):
    query = session.query(models.ImageMember)
    if not include_deleted:
        query = query.filter_by(deleted=False)

    if not context.is_admin:
        query = query.join(models.Image)
        filters = [
            models.Image.owner == context.owner,
            models.ImageMember.member == context.owner,
        ]
        query = query.filter(or_(*filters))

    if image_id is not None:
        query = query.filter(models.ImageMember.image_id == image_id)
    if member is not None:
        query = query.filter(models.ImageMember.member == member)
    if status is not None:
        query = query.filter(models.ImageMember.status == status)

    return query.all()
Beispiel #16
0
def instance_get_all_by_filters(context,
                                filters,
                                sort_key,
                                sort_dir,
                                limit=None,
                                marker=None,
                                columns_to_join=None,
                                use_slave=False):
    """Return instances that match all filters.  Deleted instances
    will be returned by default, unless there's a filter that says
    otherwise.
    Depending on the name of a filter, matching for that filter is
    performed using either exact matching or as regular expression
    matching. Exact matching is applied for the following filters::
    |   ['project_id', 'user_id', 'image_ref',
    |    'vm_state', 'instance_type_id', 'uuid',
    |    'metadata', 'host', 'system_metadata']
    A third type of filter (also using exact matching), filters
    based on instance metadata tags when supplied under a special
    key named 'filter'::
    |   filters = {
    |       'filter': [
    |           {'name': 'tag-key', 'value': '<metakey>'},
    |           {'name': 'tag-value', 'value': '<metaval>'},
    |           {'name': 'tag:<metakey>', 'value': '<metaval>'}
    |       ]
    |   }
    Special keys are used to tweek the query further::
    |   'changes-since' - only return instances updated after
    |   'deleted' - only return (or exclude) deleted instances
    |   'soft_deleted' - modify behavior of 'deleted' to either
    |                    include or exclude instances whose
    |                    vm_state is SOFT_DELETED.
    """
    # NOTE(mriedem): If the limit is 0 there is no point in even going
    # to the database since nothing is going to be returned anyway.
    if limit == 0:
        return []

    sort_fn = {'desc': desc, 'asc': asc}

    # if CONF.database.slave_connection == '':
    #     use_slave = False

    session = get_session(use_slave=use_slave)

    if columns_to_join is None:
        columns_to_join = ['info_cache', 'security_groups']
        manual_joins = ['metadata', 'system_metadata']
    else:
        manual_joins, columns_to_join = _manual_join_columns(columns_to_join)

    query_prefix = session.query(models.Instance)
    for column in columns_to_join:
        query_prefix = query_prefix.options(joinedload(column))

    query_prefix = query_prefix.order_by(sort_fn[sort_dir](getattr(
        models.Instance, sort_key)))

    # Make a copy of the filters dictionary to use going forward, as we'll
    # be modifying it and we shouldn't affect the caller's use of it.
    filters = filters.copy()
    filters_ = {}

    query_prefix = session.query(models.Instance)
    if 'changes-since' in filters:
        filters.pop('changes_since')
        changes_since = timeutils.normalize_time(filters['changes-since'])
        query_prefix = query_prefix.\
                            filter(models.Instance.updated_at >= changes_since)

    if 'deleted' in filters:
        # Instances can be soft or hard deleted and the query needs to
        # include or exclude both
        if filters.pop('deleted'):
            if filters.pop('soft_deleted', True):
                deleted = or_(models.Instance.deleted == models.Instance.id)
                query_prefix = query_prefix.\
                    filter(deleted)
            else:
                query_prefix = query_prefix.\
                    filter(models.Instance.deleted == models.Instance.id)
        else:
            query_prefix = query_prefix.\
                    filter_by(deleted=0)
            if not filters.pop('soft_deleted', False):
                # It would be better to have vm_state not be nullable
                # but until then we test it explicitly as a workaround.
                not_soft_deleted = or_(
                    models.Instance.vm_state != "soft-deleting",
                    models.Instance.vm_state == None)
                query_prefix = query_prefix.filter(not_soft_deleted)

    # if 'cleaned' in filters:
    #     if filters.pop('cleaned'):
    #         query_prefix = query_prefix.filter(models.Instance.cleaned == 1)
    #     else:
    #         query_prefix = query_prefix.filter(models.Instance.cleaned == 0)

    # if not context.is_admin:
    #     # If we're not admin context, add appropriate filter..
    #     if context.project_id:
    #         filters['project_id'] = context.project_id
    #     else:
    #         filters['user_id'] = context.user_id

    # # Filters for exact matches that we can do along with the SQL query...
    # # For other filters that don't match this, we will do regexp matching
    # exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
    #                             'vm_state', 'instance_type_id', 'uuid',
    #                             'metadata', 'host', 'task_state',
    #                             'system_metadata']

    # # Filter the query
    # query_prefix = exact_filter(query_prefix, models.Instance,
    #                             filters, exact_match_filter_names)

    # query_prefix = regex_filter(query_prefix, models.Instance, filters)
    # query_prefix = tag_filter(context, query_prefix, models.Instance,
    #                           models.InstanceMetadata,
    #                           models.InstanceMetadata.instance_uuid,
    #                           filters)

    # paginate query
    # if marker is not None:
    #     try:
    #         marker = _instance_get_by_uuid(context, marker, session=session)
    #     except exception.InstanceNotFound:
    #         raise exception.MarkerNotFound(marker)
    # TODO: following cannot yet work with the RIAK DB implementation!
    # query_prefix = sqlalchemyutils.paginate_query(query_prefix,
    #                        models.Instance, limit,
    #                        [sort_key, 'created_at', 'id'],
    #                        marker=marker,
    #                        sort_dir=sort_dir)
    # print("filters: %s" % (filters))
    # query_prefix = RomeQuery(models.Instance).filter_dict(filters_)
    # query_prefix = RomeQuery(models.Instance)
    return query_prefix.all()
def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
                                limit=None, marker=None, columns_to_join=None,
                                use_slave=False):
    """Return instances that match all filters.  Deleted instances
    will be returned by default, unless there's a filter that says
    otherwise.

    Depending on the name of a filter, matching for that filter is
    performed using either exact matching or as regular expression
    matching. Exact matching is applied for the following filters::

    |   ['project_id', 'user_id', 'image_ref',
    |    'vm_state', 'instance_type_id', 'uuid',
    |    'metadata', 'host', 'system_metadata']


    A third type of filter (also using exact matching), filters
    based on instance metadata tags when supplied under a special
    key named 'filter'::

    |   filters = {
    |       'filter': [
    |           {'name': 'tag-key', 'value': '<metakey>'},
    |           {'name': 'tag-value', 'value': '<metaval>'},
    |           {'name': 'tag:<metakey>', 'value': '<metaval>'}
    |       ]
    |   }

    Special keys are used to tweek the query further::

    |   'changes-since' - only return instances updated after
    |   'deleted' - only return (or exclude) deleted instances
    |   'soft_deleted' - modify behavior of 'deleted' to either
    |                    include or exclude instances whose
    |                    vm_state is SOFT_DELETED.

    """
    # NOTE(mriedem): If the limit is 0 there is no point in even going
    # to the database since nothing is going to be returned anyway.
    if limit == 0:
        return []

    sort_fn = {'desc': desc, 'asc': asc}

    # if CONF.database.slave_connection == '':
    #     use_slave = False

    session = get_session(use_slave=use_slave)

    if columns_to_join is None:
        columns_to_join = ['info_cache', 'security_groups']
        manual_joins = ['metadata', 'system_metadata']
    else:
        manual_joins, columns_to_join = _manual_join_columns(columns_to_join)

    query_prefix = session.query(models.Instance)
    # for column in columns_to_join:
    #     query_prefix = query_prefix.options(joinedload(column))

    query_prefix = query_prefix.order_by(sort_fn[sort_dir](
            getattr(models.Instance, sort_key)))

    # Make a copy of the filters dictionary to use going forward, as we'll
    # be modifying it and we shouldn't affect the caller's use of it.
    filters = filters.copy()
    filters_ = {}

    query_prefix = session.query(models.Instance)
    if 'changes-since' in filters:
        filters.pop('changes_since')
        changes_since = timeutils.normalize_time(filters['changes-since'])
        query_prefix = query_prefix.\
                            filter(models.Instance.updated_at >= changes_since)

    if 'deleted' in filters:
        # Instances can be soft or hard deleted and the query needs to
        # include or exclude both
        if filters.pop('deleted'):
            if filters.pop('soft_deleted', True):
                deleted = or_(
                    models.Instance.deleted == models.Instance.id,
                    models.Instance.vm_state == vm_states.SOFT_DELETED
                    )
                query_prefix = query_prefix.\
                    filter(deleted)
            else:
                query_prefix = query_prefix.\
                    filter(models.Instance.deleted == models.Instance.id)
        else:
            query_prefix = query_prefix.\
                    filter_by(deleted=0)
            if not filters.pop('soft_deleted', False):
                # It would be better to have vm_state not be nullable
                # but until then we test it explicitly as a workaround.
                not_soft_deleted = or_(
                    models.Instance.vm_state != vm_states.SOFT_DELETED,
                    models.Instance.vm_state == null()
                    )
                query_prefix = query_prefix.filter(not_soft_deleted)

    # if 'cleaned' in filters:
    #     if filters.pop('cleaned'):
    #         query_prefix = query_prefix.filter(models.Instance.cleaned == 1)
    #     else:
    #         query_prefix = query_prefix.filter(models.Instance.cleaned == 0)

    # if not context.is_admin:
    #     # If we're not admin context, add appropriate filter..
    #     if context.project_id:
    #         filters['project_id'] = context.project_id
    #     else:
    #         filters['user_id'] = context.user_id

    # # Filters for exact matches that we can do along with the SQL query...
    # # For other filters that don't match this, we will do regexp matching
    # exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
    #                             'vm_state', 'instance_type_id', 'uuid',
    #                             'metadata', 'host', 'task_state',
    #                             'system_metadata']

    # # Filter the query
    # query_prefix = exact_filter(query_prefix, models.Instance,
    #                             filters, exact_match_filter_names)

    # query_prefix = regex_filter(query_prefix, models.Instance, filters)
    # query_prefix = tag_filter(context, query_prefix, models.Instance,
    #                           models.InstanceMetadata,
    #                           models.InstanceMetadata.instance_uuid,
    #                           filters)

    # paginate query
    # if marker is not None:
    #     try:
    #         marker = _instance_get_by_uuid(context, marker, session=session)
    #     except exception.InstanceNotFound:
    #         raise exception.MarkerNotFound(marker)
    # TODO: following cannot yet work with the RIAK DB implementation!
    # query_prefix = sqlalchemyutils.paginate_query(query_prefix,
    #                        models.Instance, limit,
    #                        [sort_key, 'created_at', 'id'],
    #                        marker=marker,
    #                        sort_dir=sort_dir)
    # print("filters: %s" % (filters))
    # query_prefix = RomeQuery(models.Instance).filter_dict(filters_)
    # query_prefix = RomeQuery(models.Instance)
    return query_prefix.all()
Beispiel #18
0
def _paginate_query(query,
                    model,
                    limit,
                    sort_keys,
                    marker=None,
                    sort_dir=None,
                    sort_dirs=None):
    """Returns a query with sorting / pagination criteria added.
    Pagination works by requiring a unique sort_key, specified by sort_keys.
    (If sort_keys is not unique, then we risk looping through values.)
    We use the last row in the previous page as the 'marker' for pagination.
    So we must return values that follow the passed marker in the order.
    With a single-valued sort_key, this would be easy: sort_key > X.
    With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
    the lexicographical ordering:
    (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
    We also have to cope with different sort_directions.
    Typically, the id of the last row is used as the client-facing pagination
    marker, then the actual marker object must be fetched from the db and
    passed in to us as marker.
    :param query: the query object to which we should add paging/sorting
    :param model: the ORM model class
    :param limit: maximum number of items to return
    :param sort_keys: array of attributes by which results should be sorted
    :param marker: the last item of the previous page; we returns the next
                    results after this value.
    :param sort_dir: direction in which results should be sorted (asc, desc)
    :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
    :rtype: sqlalchemy.orm.query.Query
    :return: The query with sorting/pagination added.
    """

    if 'id' not in sort_keys:
        # TODO(justinsb): If this ever gives a false-positive, check
        # the actual primary key, rather than assuming its id
        LOG.warn('Id not in sort_keys; is sort_keys unique?')

    assert (not (sort_dir and sort_dirs))

    # Default the sort direction to ascending
    if sort_dirs is None and sort_dir is None:
        sort_dir = 'asc'

    # Ensure a per-column sort direction
    if sort_dirs is None:
        sort_dirs = [sort_dir for _sort_key in sort_keys]

    assert (len(sort_dirs) == len(sort_keys))

    # Add sorting
    for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
        sort_dir_func = {
            'asc': sqlalchemy.asc,
            'desc': sqlalchemy.desc,
        }[current_sort_dir]

        try:
            sort_key_attr = getattr(model, current_sort_key)
        except AttributeError:
            raise Exception("exception.InvalidSortKey()")
        query = query.order_by(sort_dir_func(sort_key_attr))

    default = ''  # Default to an empty string if NULL

    # Add pagination
    if marker is not None:
        marker_values = []
        for sort_key in sort_keys:
            v = getattr(marker, sort_key)
            if v is None:
                v = default
            marker_values.append(v)

        # Build up an array of sort criteria as in the docstring
        criteria_list = []
        for i in range(len(sort_keys)):
            crit_attrs = []
            for j in range(i):
                model_attr = getattr(model, sort_keys[j])
                default = None if isinstance(
                    model_attr.property.columns[0].type,
                    sqlalchemy.DateTime) else ''
                attr = sa_sql.expression.case([
                    (model_attr != None, model_attr),
                ],
                                              else_=default)
                crit_attrs.append((attr == marker_values[j]))

            model_attr = getattr(model, sort_keys[i])
            default = None if isinstance(model_attr.property.columns[0].type,
                                         sqlalchemy.DateTime) else ''
            attr = sa_sql.expression.case([
                (model_attr != None, model_attr),
            ],
                                          else_=default)
            if sort_dirs[i] == 'desc':
                crit_attrs.append((attr < marker_values[i]))
            elif sort_dirs[i] == 'asc':
                crit_attrs.append((attr > marker_values[i]))
            else:
                raise ValueError("Unknown sort direction, "
                                 "must be 'desc' or 'asc'")

            criteria = and_(*crit_attrs)
            criteria_list.append(criteria)

        f = or_(*criteria_list)
        query = query.filter(f)

    if limit is not None:
        query = query.limit(limit)

    return query
Beispiel #19
0
def _paginate_query(query, model, limit, sort_keys, marker=None,
                    sort_dir=None, sort_dirs=None):
    """Returns a query with sorting / pagination criteria added.
    Pagination works by requiring a unique sort_key, specified by sort_keys.
    (If sort_keys is not unique, then we risk looping through values.)
    We use the last row in the previous page as the 'marker' for pagination.
    So we must return values that follow the passed marker in the order.
    With a single-valued sort_key, this would be easy: sort_key > X.
    With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
    the lexicographical ordering:
    (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
    We also have to cope with different sort_directions.
    Typically, the id of the last row is used as the client-facing pagination
    marker, then the actual marker object must be fetched from the db and
    passed in to us as marker.
    :param query: the query object to which we should add paging/sorting
    :param model: the ORM model class
    :param limit: maximum number of items to return
    :param sort_keys: array of attributes by which results should be sorted
    :param marker: the last item of the previous page; we returns the next
                    results after this value.
    :param sort_dir: direction in which results should be sorted (asc, desc)
    :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
    :rtype: sqlalchemy.orm.query.Query
    :return: The query with sorting/pagination added.
    """

    if 'id' not in sort_keys:
        # TODO(justinsb): If this ever gives a false-positive, check
        # the actual primary key, rather than assuming its id
        LOG.warn('Id not in sort_keys; is sort_keys unique?')

    assert(not (sort_dir and sort_dirs))

    # Default the sort direction to ascending
    if sort_dirs is None and sort_dir is None:
        sort_dir = 'asc'

    # Ensure a per-column sort direction
    if sort_dirs is None:
        sort_dirs = [sort_dir for _sort_key in sort_keys]

    assert(len(sort_dirs) == len(sort_keys))

    # Add sorting
    for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
        sort_dir_func = {
            'asc': sqlalchemy.asc,
            'desc': sqlalchemy.desc,
        }[current_sort_dir]

        try:
            sort_key_attr = getattr(model, current_sort_key)
        except AttributeError:
            raise Exception("exception.InvalidSortKey()")
        query = query.order_by(sort_dir_func(sort_key_attr))

    default = ''  # Default to an empty string if NULL

    # Add pagination
    if marker is not None:
        marker_values = []
        for sort_key in sort_keys:
            v = getattr(marker, sort_key)
            if v is None:
                v = default
            marker_values.append(v)

        # Build up an array of sort criteria as in the docstring
        criteria_list = []
        for i in range(len(sort_keys)):
            crit_attrs = []
            for j in range(i):
                model_attr = getattr(model, sort_keys[j])
                default = None if isinstance(
                    model_attr.property.columns[0].type,
                    sqlalchemy.DateTime) else ''
                attr = sa_sql.expression.case([(model_attr != None,
                                              model_attr), ],
                                              else_=default)
                crit_attrs.append((attr == marker_values[j]))

            model_attr = getattr(model, sort_keys[i])
            default = None if isinstance(model_attr.property.columns[0].type,
                                         sqlalchemy.DateTime) else ''
            attr = sa_sql.expression.case([(model_attr != None,
                                          model_attr), ],
                                          else_=default)
            if sort_dirs[i] == 'desc':
                crit_attrs.append((attr < marker_values[i]))
            elif sort_dirs[i] == 'asc':
                crit_attrs.append((attr > marker_values[i]))
            else:
                raise ValueError("Unknown sort direction, "
                                   "must be 'desc' or 'asc'")

            criteria = and_(*crit_attrs)
            criteria_list.append(criteria)

        f = or_(*criteria_list)
        query = query.filter(f)

    if limit is not None:
        query = query.limit(limit)

    return query