Example #1
0
    def _find(self, context, table, cls, list_cls, exc_notfound, criterion,
              one=False, marker=None, limit=None, sort_key=None,
              sort_dir=None, query=None, apply_tenant_criteria=True):

        sort_key = sort_key or 'created_at'
        sort_dir = sort_dir or 'asc'

        # Build the query
        if query is None:
            query = select([table])
        query = self._apply_criterion(table, query, criterion)
        if apply_tenant_criteria:
            query = self._apply_tenant_criteria(context, table, query)

        query = self._apply_deleted_criteria(context, table, query)

        # Execute the Query
        if one:
            # NOTE(kiall): If we expect one value, and two rows match, we raise
            #              a NotFound. Limiting to 2 allows us to determine
            #              when we need to raise, while selecting the minimal
            #              number of rows.
            resultproxy = self.session.execute(query.limit(2))
            results = resultproxy.fetchall()

            if len(results) != 1:
                msg = "Could not find %s" % cls.obj_name()
                raise exc_notfound(msg)
            else:
                return _set_object_from_model(cls(), results[0])
        else:
            if marker is not None:
                marker = utils.check_marker(table, marker, self.session)

            try:
                query = utils.paginate_query(
                    query, table, limit,
                    [sort_key, 'id'], marker=marker,
                    sort_dir=sort_dir)

                resultproxy = self.session.execute(query)
                results = resultproxy.fetchall()

                return _set_listobject_from_models(list_cls(), results)
            except oslodb_utils.InvalidSortKey as sort_key_error:
                raise exceptions.InvalidSortKey(six.text_type(sort_key_error))
            # Any ValueErrors are propagated back to the user as is.
            # Limits, sort_dir and sort_key are checked at the API layer.
            # If however central or storage is called directly, invalid values
            # show up as ValueError
            except ValueError as value_error:
                raise exceptions.ValueError(six.text_type(value_error))
Example #2
0
    def _find(self, context, table, cls, list_cls, exc_notfound, criterion,
              one=False, marker=None, limit=None, sort_key=None,
              sort_dir=None, query=None, apply_tenant_criteria=True):

        sort_key = sort_key or 'created_at'
        sort_dir = sort_dir or 'asc'

        # Build the query
        if query is None:
            query = select([table])
        query = self._apply_criterion(table, query, criterion)
        if apply_tenant_criteria:
            query = self._apply_tenant_criteria(context, table, query)

        query = self._apply_deleted_criteria(context, table, query)

        # Execute the Query
        if one:
            # NOTE(kiall): If we expect one value, and two rows match, we raise
            #              a NotFound. Limiting to 2 allows us to determine
            #              when we need to raise, while selecting the minimal
            #              number of rows.
            resultproxy = self.session.execute(query.limit(2))
            results = resultproxy.fetchall()

            if len(results) != 1:
                msg = "Could not find %s" % cls.obj_name()
                raise exc_notfound(msg)
            else:
                return _set_object_from_model(cls(), results[0])
        else:
            if marker is not None:
                marker = utils.check_marker(table, marker, self.session)

            try:
                query = utils.paginate_query(
                    query, table, limit,
                    [sort_key, 'id'], marker=marker,
                    sort_dir=sort_dir)

                resultproxy = self.session.execute(query)
                results = resultproxy.fetchall()

                return _set_listobject_from_models(list_cls(), results)
            except oslodb_utils.InvalidSortKey as sort_key_error:
                raise exceptions.InvalidSortKey(six.text_type(sort_key_error))
            # Any ValueErrors are propagated back to the user as is.
            # Limits, sort_dir and sort_key are checked at the API layer.
            # If however central or storage is called directly, invalid values
            # show up as ValueError
            except ValueError as value_error:
                raise exceptions.ValueError(six.text_type(value_error))
Example #3
0
    def _find_recordsets_with_records(
        self, context, table, cls,
        list_cls, exc_notfound, criterion,
        one=False, marker=None, limit=None, sort_key=None,
        sort_dir=None, query=None, apply_tenant_criteria=True,
        load_relations=False, relation_table=None, relation_cls=None,
            relation_list_cls=None, relation_not_found_exc=None):

        sort_key = sort_key or 'created_at'
        sort_dir = sort_dir or 'asc'

        # Join the 2 required tables
        rjoin = table.outerjoin(
            relation_table,
            relation_table.c.recordset_id == table.c.id)

        inner_q = select([table.c.id])

        if marker is not None:
            marker = utils.check_marker(table, marker, self.session)

        try:
            inner_q = utils.paginate_query(
                inner_q, table, limit,
                [sort_key, 'id'], marker=marker,
                sort_dir=sort_dir)

        except oslodb_utils.InvalidSortKey as sort_key_error:
            raise exceptions.InvalidSortKey(six.text_type(sort_key_error))
        # Any ValueErrors are propagated back to the user as is.
        # Limits, sort_dir and sort_key are checked at the API layer.
        # If however central or storage is called directly, invalid values
        # show up as ValueError
        except ValueError as value_error:
            raise exceptions.ValueError(six.text_type(value_error))

        inner_q = self._apply_criterion(table, inner_q, criterion)
        inner_q = self._apply_deleted_criteria(context, table, inner_q)

        # Get the list of IDs needed.
        # This is a separate call due to
        # http://dev.mysql.com/doc/mysql-reslimits-excerpt/5.6/en/subquery-restrictions.html  # noqa

        inner_rproxy = self.session.execute(inner_q)
        ids = inner_rproxy.fetchall()

        # formatted_ids = [id[0] for id in ids]
        formatted_ids = six.moves.map(operator.itemgetter(0), ids)

        query = select(
            [
                # RS Info
                table.c.id,                                 # 0 - RS ID
                table.c.version,                            # 1 - RS Version
                table.c.created_at,                         # 2 - RS Created
                table.c.updated_at,                         # 3 - RS Updated
                table.c.tenant_id,                          # 4 - RS Tenant
                table.c.domain_id,                          # 5 - RS Domain
                table.c.name,                               # 6 - RS Name
                table.c.type,                               # 7 - RS Type
                table.c.ttl,                                # 8 - RS TTL
                table.c.description,                        # 9 - RS Desc
                # R Info
                relation_table.c.id,                        # 10 - R ID
                relation_table.c.version,                   # 11 - R Version
                relation_table.c.created_at,                # 12 - R Created
                relation_table.c.updated_at,                # 13 - R Updated
                relation_table.c.tenant_id,                 # 14 - R Tenant
                relation_table.c.domain_id,                 # 15 - R Domain
                relation_table.c.recordset_id,              # 16 - R RSet
                relation_table.c.data,                      # 17 - R Data
                relation_table.c.description,               # 18 - R Desc
                relation_table.c.hash,                      # 19 - R Hash
                relation_table.c.managed,                   # 20 - R Mngd Flg
                relation_table.c.managed_plugin_name,       # 21 - R Mngd Plg
                relation_table.c.managed_resource_type,     # 22 - R Mngd Type
                relation_table.c.managed_resource_region,   # 23 - R Mngd Rgn
                relation_table.c.managed_resource_id,       # 24 - R Mngd ID
                relation_table.c.managed_tenant_id,         # 25 - R Mngd T ID
                relation_table.c.status,                    # 26 - R Status
                relation_table.c.action,                    # 27 - R Action
                relation_table.c.serial                     # 28 - R Serial
            ]).\
            select_from(
                rjoin
                       ).\
            where(
                table.c.id.in_(formatted_ids)
                 )

        # These make looking up indexes for the Raw Rows much easier,
        # and maintainable

        rs_map = {
            "id": 0,
            "version": 1,
            "created_at": 2,
            "updated_at": 3,
            "tenant_id": 4,
            "domain_id": 5,
            "name": 6,
            "type": 7,
            "ttl": 8,
            "description": 9,
        }

        r_map = {
            "id": 10,
            "version": 11,
            "created_at": 12,
            "updated_at": 13,
            "tenant_id": 14,
            "domain_id": 15,
            "recordset_id": 16,
            "data": 17,
            "description": 18,
            "hash": 19,
            "managed": 20,
            "managed_plugin_name": 21,
            "managed_resource_type": 22,
            "managed_resource_region": 23,
            "managed_resource_id": 24,
            "managed_tenant_id": 25,
            "status": 26,
            "action": 27,
            "serial": 28,
        }

        query, sort_dirs = utils.sort_query(query, table, [sort_key, 'id'],
                                            sort_dir=sort_dir)

        try:
            resultproxy = self.session.execute(query)
            raw_rows = resultproxy.fetchall()

        # Any ValueErrors are propagated back to the user as is.
        # If however central or storage is called directly, invalid values
        # show up as ValueError
        except ValueError as value_error:
            raise exceptions.ValueError(six.text_type(value_error))

        rrsets = list_cls()
        rrset_id = None
        current_rrset = None

        for record in raw_rows:
            # If we're looking at the first, or a new rrset
            if record[0] != rrset_id:
                if current_rrset is not None:
                    # If this isn't the first iteration
                    rrsets.append(current_rrset)
                # Set up a new rrset
                current_rrset = cls()

                rrset_id = record[rs_map['id']]

                # Add all the loaded vars into RecordSet object

                for key, value in rs_map.items():
                    setattr(current_rrset, key, record[value])

                current_rrset.records = relation_list_cls()

                if record[r_map['id']] is not None:
                    rrdata = relation_cls()

                    for key, value in r_map.items():
                        setattr(rrdata, key, record[value])

                    current_rrset.records.append(rrdata)

            else:
                # We've already got an rrset, add the rdata
                if record[r_map['id']] is not None:

                    for key, value in r_map.items():
                        setattr(rrdata, key, record[value])

                    current_rrset.records.append(rrdata)

        # If the last record examined was a new rrset, or there is only 1 rrset
        if len(rrsets) == 0 or \
                (len(rrsets) != 0 and rrsets[-1] != current_rrset):
            if current_rrset is not None:
                rrsets.append(current_rrset)

        return rrsets
Example #4
0
    def _find(
        self,
        context,
        table,
        cls,
        list_cls,
        exc_notfound,
        criterion,
        one=False,
        marker=None,
        limit=None,
        sort_key=None,
        sort_dir=None,
        query=None,
        apply_tenant_criteria=True,
    ):
        sort_key = sort_key or "created_at"
        sort_dir = sort_dir or "asc"

        # Build the query
        if query is None:
            query = select([table])
        query = self._apply_criterion(table, query, criterion)
        if apply_tenant_criteria:
            query = self._apply_tenant_criteria(context, table, query)
        query = self._apply_deleted_criteria(context, table, query)

        # Execute the Query
        if one:
            # NOTE(kiall): If we expect one value, and two rows match, we raise
            #              a NotFound. Limiting to 2 allows us to determine
            #              when we need to raise, while selecting the minimal
            #              number of rows.
            resultproxy = self.session.execute(query.limit(2))
            results = resultproxy.fetchall()

            if len(results) != 1:
                raise exc_notfound()
            else:
                return _set_object_from_model(cls(), results[0])
        else:
            if marker is not None:
                # If marker is not none and basestring we query it.
                # Otherwise, return all matching records
                marker_query = select([table]).where(table.c.id == marker)

                try:
                    marker_resultproxy = self.session.execute(marker_query)
                    marker = marker_resultproxy.fetchone()
                    if marker is None:
                        raise exceptions.MarkerNotFound("Marker %s could not be found" % marker)
                except oslo_db_exception.DBError as e:
                    # Malformed UUIDs return StatementError wrapped in a
                    # DBError
                    if isinstance(e.inner_exception, sqlalchemy_exc.StatementError):
                        raise exceptions.InvalidMarker()
                    else:
                        raise

            try:
                query = utils.paginate_query(query, table, limit, [sort_key, "id"], marker=marker, sort_dir=sort_dir)

                resultproxy = self.session.execute(query)
                results = resultproxy.fetchall()

                return _set_listobject_from_models(list_cls(), results)
            except oslodb_utils.InvalidSortKey as sort_key_error:
                raise exceptions.InvalidSortKey(sort_key_error.message)
            # Any ValueErrors are propagated back to the user as is.
            # Limits, sort_dir and sort_key are checked at the API layer.
            # If however central or storage is called directly, invalid values
            # show up as ValueError
            except ValueError as value_error:
                raise exceptions.ValueError(value_error.message)
Example #5
0
    def _find(self, context, table, cls, list_cls, exc_notfound, criterion,
              one=False, marker=None, limit=None, sort_key=None,
              sort_dir=None, query=None, apply_tenant_criteria=True):
        sort_key = sort_key or 'created_at'
        sort_dir = sort_dir or 'asc'

        # Build the query
        if query is None:
            query = select([table])
        query = self._apply_criterion(table, query, criterion)
        if apply_tenant_criteria:
            query = self._apply_tenant_criteria(context, table, query)
        query = self._apply_deleted_criteria(context, table, query)

        # Execute the Query
        if one:
            # NOTE(kiall): If we expect one value, and two rows match, we raise
            #              a NotFound. Limiting to 2 allows us to determine
            #              when we need to raise, while selecting the minimal
            #              number of rows.
            resultproxy = self.session.execute(query.limit(2))
            results = resultproxy.fetchall()

            if len(results) != 1:
                raise exc_notfound()
            else:
                return _set_object_from_model(cls(), results[0])
        else:
            if marker is not None:
                # If marker is not none and basestring we query it.
                # Otherwise, return all matching records
                marker_query = select([table]).where(table.c.id == marker)

                try:
                    marker_resultproxy = self.session.execute(marker_query)
                    marker = marker_resultproxy.fetchone()
                    if marker is None:
                        raise exceptions.MarkerNotFound(
                            'Marker %s could not be found' % marker)
                except oslo_db_exception.DBError as e:
                    # Malformed UUIDs return StatementError wrapped in a
                    # DBError
                    if isinstance(e.inner_exception,
                                  sqlalchemy_exc.StatementError):
                        raise exceptions.InvalidMarker()
                    else:
                        raise

            try:
                query = utils.paginate_query(
                    query, table, limit,
                    [sort_key, 'id', 'created_at'], marker=marker,
                    sort_dir=sort_dir)

                resultproxy = self.session.execute(query)
                results = resultproxy.fetchall()

                return _set_listobject_from_models(list_cls(), results)
            except oslodb_utils.InvalidSortKey as sort_key_error:
                raise exceptions.InvalidSortKey(sort_key_error.message)
            # Any ValueErrors are propagated back to the user as is.
            # Limits, sort_dir and sort_key are checked at the API layer.
            # If however central or storage is called directly, invalid values
            # show up as ValueError
            except ValueError as value_error:
                raise exceptions.ValueError(value_error.message)
Example #6
0
    def _find_recordsets_with_records(self,
                                      context,
                                      criterion,
                                      zones_table,
                                      recordsets_table,
                                      records_table,
                                      one=False,
                                      marker=None,
                                      limit=None,
                                      sort_key=None,
                                      sort_dir=None,
                                      query=None,
                                      apply_tenant_criteria=True,
                                      force_index=False):

        sort_key = sort_key or 'created_at'
        sort_dir = sort_dir or 'asc'
        data = criterion.pop('data', None)
        status = criterion.pop('status', None)
        filtering_records = data or status

        # sort key will be used for the ORDER BY key in query,
        # needs to use the correct table index for different sort keys
        index_hint = utils.get_rrset_index(sort_key) if force_index else None

        rzjoin = recordsets_table.join(
            zones_table, recordsets_table.c.zone_id == zones_table.c.id)

        if filtering_records:
            rzjoin = rzjoin.join(
                records_table,
                recordsets_table.c.id == records_table.c.recordset_id)

        inner_q = select([recordsets_table.c.id,      # 0 - RS ID
                          zones_table.c.name]         # 1 - ZONE NAME
                         ).select_from(rzjoin).\
            where(zones_table.c.deleted == '0')

        count_q = select([func.count(distinct(recordsets_table.c.id))]).\
            select_from(rzjoin).where(zones_table.c.deleted == '0')

        if index_hint:
            inner_q = inner_q.with_hint(recordsets_table, index_hint)

        if marker is not None:
            marker = utils.check_marker(recordsets_table, marker, self.session)

        try:
            inner_q = utils.paginate_query(inner_q,
                                           recordsets_table,
                                           limit, [sort_key, 'id'],
                                           marker=marker,
                                           sort_dir=sort_dir)

        except oslodb_utils.InvalidSortKey as sort_key_error:
            raise exceptions.InvalidSortKey(six.text_type(sort_key_error))
        # Any ValueErrors are propagated back to the user as is.
        # Limits, sort_dir and sort_key are checked at the API layer.
        # If however central or storage is called directly, invalid values
        # show up as ValueError
        except ValueError as value_error:
            raise exceptions.ValueError(six.text_type(value_error))

        if apply_tenant_criteria:
            inner_q = self._apply_tenant_criteria(context,
                                                  recordsets_table,
                                                  inner_q,
                                                  include_null_tenant=False)
            count_q = self._apply_tenant_criteria(context,
                                                  recordsets_table,
                                                  count_q,
                                                  include_null_tenant=False)

        inner_q = self._apply_criterion(recordsets_table, inner_q, criterion)
        count_q = self._apply_criterion(recordsets_table, count_q, criterion)

        if filtering_records:
            records_criterion = dict(
                (k, v) for k, v in (('data', data), ('status', status))
                if v is not None)
            inner_q = self._apply_criterion(records_table, inner_q,
                                            records_criterion)
            count_q = self._apply_criterion(records_table, count_q,
                                            records_criterion)

        inner_q = self._apply_deleted_criteria(context, recordsets_table,
                                               inner_q)
        count_q = self._apply_deleted_criteria(context, recordsets_table,
                                               count_q)

        # Get the list of IDs needed.
        # This is a separate call due to
        # http://dev.mysql.com/doc/mysql-reslimits-excerpt/5.6/en/subquery-restrictions.html  # noqa

        inner_rproxy = self.session.execute(inner_q)
        rows = inner_rproxy.fetchall()
        if len(rows) == 0:
            return 0, objects.RecordSetList()
        id_zname_map = {}
        for r in rows:
            id_zname_map[r[0]] = r[1]
        formatted_ids = six.moves.map(operator.itemgetter(0), rows)

        # Count query does not scale well for large amount of recordsets,
        # don't do it if the header 'OpenStack-DNS-Hide-Counts: True' exists
        if context.hide_counts:
            total_count = None
        else:
            resultproxy = self.session.execute(count_q)
            result = resultproxy.fetchone()
            total_count = 0 if result is None else result[0]

        # Join the 2 required tables
        rjoin = recordsets_table.outerjoin(
            records_table,
            records_table.c.recordset_id == recordsets_table.c.id)

        query = select([
            # RS Info
            recordsets_table.c.id,  # 0 - RS ID
            recordsets_table.c.version,  # 1 - RS Version
            recordsets_table.c.created_at,  # 2 - RS Created
            recordsets_table.c.updated_at,  # 3 - RS Updated
            recordsets_table.c.tenant_id,  # 4 - RS Tenant
            recordsets_table.c.zone_id,  # 5 - RS Zone
            recordsets_table.c.name,  # 6 - RS Name
            recordsets_table.c.type,  # 7 - RS Type
            recordsets_table.c.ttl,  # 8 - RS TTL
            recordsets_table.c.description,  # 9 - RS Desc
            # R Info
            records_table.c.id,  # 10 - R ID
            records_table.c.version,  # 11 - R Version
            records_table.c.created_at,  # 12 - R Created
            records_table.c.updated_at,  # 13 - R Updated
            records_table.c.tenant_id,  # 14 - R Tenant
            records_table.c.zone_id,  # 15 - R Zone
            records_table.c.recordset_id,  # 16 - R RSet
            records_table.c.data,  # 17 - R Data
            records_table.c.description,  # 18 - R Desc
            records_table.c.hash,  # 19 - R Hash
            records_table.c.managed,  # 20 - R Mngd Flg
            records_table.c.managed_plugin_name,  # 21 - R Mngd Plg
            records_table.c.managed_resource_type,  # 22 - R Mngd Type
            records_table.c.managed_resource_region,  # 23 - R Mngd Rgn
            records_table.c.managed_resource_id,  # 24 - R Mngd ID
            records_table.c.managed_tenant_id,  # 25 - R Mngd T ID
            records_table.c.status,  # 26 - R Status
            records_table.c.action,  # 27 - R Action
            records_table.c.serial  # 28 - R Serial
        ]).select_from(rjoin)

        query = query.where(recordsets_table.c.id.in_(formatted_ids))

        # These make looking up indexes for the Raw Rows much easier,
        # and maintainable

        rs_map = {
            "id": 0,
            "version": 1,
            "created_at": 2,
            "updated_at": 3,
            "tenant_id": 4,
            "zone_id": 5,
            "name": 6,
            "type": 7,
            "ttl": 8,
            "description": 9,
        }

        r_map = {
            "id": 10,
            "version": 11,
            "created_at": 12,
            "updated_at": 13,
            "tenant_id": 14,
            "zone_id": 15,
            "recordset_id": 16,
            "data": 17,
            "description": 18,
            "hash": 19,
            "managed": 20,
            "managed_plugin_name": 21,
            "managed_resource_type": 22,
            "managed_resource_region": 23,
            "managed_resource_id": 24,
            "managed_tenant_id": 25,
            "status": 26,
            "action": 27,
            "serial": 28,
        }

        query, sort_dirs = utils.sort_query(query,
                                            recordsets_table, [sort_key, 'id'],
                                            sort_dir=sort_dir)

        try:
            resultproxy = self.session.execute(query)
            raw_rows = resultproxy.fetchall()

        # Any ValueErrors are propagated back to the user as is.
        # If however central or storage is called directly, invalid values
        # show up as ValueError
        except ValueError as value_error:
            raise exceptions.ValueError(six.text_type(value_error))

        rrsets = objects.RecordSetList()
        rrset_id = None
        current_rrset = None

        for record in raw_rows:
            # If we're looking at the first, or a new rrset
            if record[0] != rrset_id:
                if current_rrset is not None:
                    # If this isn't the first iteration
                    rrsets.append(current_rrset)
                # Set up a new rrset
                current_rrset = objects.RecordSet()

                rrset_id = record[rs_map['id']]

                # Add all the loaded vars into RecordSet object

                for key, value in rs_map.items():
                    setattr(current_rrset, key, record[value])

                current_rrset.zone_name = id_zname_map[current_rrset.id]
                current_rrset.obj_reset_changes(['zone_name'])

                current_rrset.records = objects.RecordList()

                if record[r_map['id']] is not None:
                    rrdata = objects.Record()

                    for key, value in r_map.items():
                        setattr(rrdata, key, record[value])

                    current_rrset.records.append(rrdata)

            else:
                # We've already got an rrset, add the rdata
                if record[r_map['id']] is not None:
                    rrdata = objects.Record()

                    for key, value in r_map.items():
                        setattr(rrdata, key, record[value])

                    current_rrset.records.append(rrdata)

        # If the last record examined was a new rrset, or there is only 1 rrset
        if len(rrsets) == 0 or \
                (len(rrsets) != 0 and rrsets[-1] != current_rrset):
            if current_rrset is not None:
                rrsets.append(current_rrset)

        return total_count, rrsets
Example #7
0
    def _find_recordsets_with_records(self, context, criterion, zones_table,
                                      recordsets_table, records_table,
                                      one=False, marker=None, limit=None,
                                      sort_key=None, sort_dir=None, query=None,
                                      apply_tenant_criteria=True):

        sort_key = sort_key or 'created_at'
        sort_dir = sort_dir or 'asc'
        data = criterion.pop('data', None)
        status = criterion.pop('status', None)
        filtering_records = data or status

        rzjoin = recordsets_table.join(
                zones_table,
                recordsets_table.c.zone_id == zones_table.c.id)

        if filtering_records:
            rzjoin = rzjoin.join(
                    records_table,
                    recordsets_table.c.id == records_table.c.recordset_id)

        inner_q = select([recordsets_table.c.id,      # 0 - RS ID
                          zones_table.c.name]         # 1 - ZONE NAME
                         ).select_from(rzjoin).\
            where(zones_table.c.deleted == '0')
        count_q = select([func.count(distinct(recordsets_table.c.id))]).\
            select_from(rzjoin).where(zones_table.c.deleted == '0')

        if marker is not None:
            marker = utils.check_marker(recordsets_table, marker,
                                        self.session)

        try:
            inner_q = utils.paginate_query(
                inner_q, recordsets_table, limit,
                [sort_key, 'id'], marker=marker,
                sort_dir=sort_dir)

        except oslodb_utils.InvalidSortKey as sort_key_error:
            raise exceptions.InvalidSortKey(six.text_type(sort_key_error))
        # Any ValueErrors are propagated back to the user as is.
        # Limits, sort_dir and sort_key are checked at the API layer.
        # If however central or storage is called directly, invalid values
        # show up as ValueError
        except ValueError as value_error:
            raise exceptions.ValueError(six.text_type(value_error))

        if apply_tenant_criteria:
            inner_q = self._apply_tenant_criteria(context, recordsets_table,
                                                  inner_q)
            count_q = self._apply_tenant_criteria(context, recordsets_table,
                                                  count_q)

        inner_q = self._apply_criterion(recordsets_table, inner_q, criterion)
        count_q = self._apply_criterion(recordsets_table, count_q, criterion)

        if filtering_records:
            records_criterion = dict((k, v) for k, v in (
                ('data', data), ('status', status)) if v is not None)
            inner_q = self._apply_criterion(records_table, inner_q,
                                            records_criterion)
            count_q = self._apply_criterion(records_table, count_q,
                                            records_criterion)

        inner_q = self._apply_deleted_criteria(context, recordsets_table,
                                               inner_q)
        count_q = self._apply_deleted_criteria(context, recordsets_table,
                                               count_q)

        # Get the list of IDs needed.
        # This is a separate call due to
        # http://dev.mysql.com/doc/mysql-reslimits-excerpt/5.6/en/subquery-restrictions.html  # noqa

        inner_rproxy = self.session.execute(inner_q)
        rows = inner_rproxy.fetchall()
        if len(rows) == 0:
            return 0, objects.RecordSetList()
        id_zname_map = {}
        for r in rows:
            id_zname_map[r[0]] = r[1]
        formatted_ids = six.moves.map(operator.itemgetter(0), rows)

        resultproxy = self.session.execute(count_q)
        result = resultproxy.fetchone()
        total_count = 0 if result is None else result[0]

        # Join the 2 required tables
        rjoin = recordsets_table.outerjoin(
            records_table,
            records_table.c.recordset_id == recordsets_table.c.id)

        query = select(
            [
                # RS Info
                recordsets_table.c.id,                     # 0 - RS ID
                recordsets_table.c.version,                # 1 - RS Version
                recordsets_table.c.created_at,             # 2 - RS Created
                recordsets_table.c.updated_at,             # 3 - RS Updated
                recordsets_table.c.tenant_id,              # 4 - RS Tenant
                recordsets_table.c.zone_id,                # 5 - RS Zone
                recordsets_table.c.name,                   # 6 - RS Name
                recordsets_table.c.type,                   # 7 - RS Type
                recordsets_table.c.ttl,                    # 8 - RS TTL
                recordsets_table.c.description,            # 9 - RS Desc
                # R Info
                records_table.c.id,                        # 10 - R ID
                records_table.c.version,                   # 11 - R Version
                records_table.c.created_at,                # 12 - R Created
                records_table.c.updated_at,                # 13 - R Updated
                records_table.c.tenant_id,                 # 14 - R Tenant
                records_table.c.zone_id,                   # 15 - R Zone
                records_table.c.recordset_id,              # 16 - R RSet
                records_table.c.data,                      # 17 - R Data
                records_table.c.description,               # 18 - R Desc
                records_table.c.hash,                      # 19 - R Hash
                records_table.c.managed,                   # 20 - R Mngd Flg
                records_table.c.managed_plugin_name,       # 21 - R Mngd Plg
                records_table.c.managed_resource_type,     # 22 - R Mngd Type
                records_table.c.managed_resource_region,   # 23 - R Mngd Rgn
                records_table.c.managed_resource_id,       # 24 - R Mngd ID
                records_table.c.managed_tenant_id,         # 25 - R Mngd T ID
                records_table.c.status,                    # 26 - R Status
                records_table.c.action,                    # 27 - R Action
                records_table.c.serial                     # 28 - R Serial
            ]).select_from(rjoin)

        query = query.where(
            recordsets_table.c.id.in_(formatted_ids)
        )

        # These make looking up indexes for the Raw Rows much easier,
        # and maintainable

        rs_map = {
            "id": 0,
            "version": 1,
            "created_at": 2,
            "updated_at": 3,
            "tenant_id": 4,
            "zone_id": 5,
            "name": 6,
            "type": 7,
            "ttl": 8,
            "description": 9,
        }

        r_map = {
            "id": 10,
            "version": 11,
            "created_at": 12,
            "updated_at": 13,
            "tenant_id": 14,
            "zone_id": 15,
            "recordset_id": 16,
            "data": 17,
            "description": 18,
            "hash": 19,
            "managed": 20,
            "managed_plugin_name": 21,
            "managed_resource_type": 22,
            "managed_resource_region": 23,
            "managed_resource_id": 24,
            "managed_tenant_id": 25,
            "status": 26,
            "action": 27,
            "serial": 28,
        }

        query, sort_dirs = utils.sort_query(query, recordsets_table,
                                            [sort_key, 'id'],
                                            sort_dir=sort_dir)

        try:
            resultproxy = self.session.execute(query)
            raw_rows = resultproxy.fetchall()

        # Any ValueErrors are propagated back to the user as is.
        # If however central or storage is called directly, invalid values
        # show up as ValueError
        except ValueError as value_error:
            raise exceptions.ValueError(six.text_type(value_error))

        rrsets = objects.RecordSetList()
        rrset_id = None
        current_rrset = None

        for record in raw_rows:
            # If we're looking at the first, or a new rrset
            if record[0] != rrset_id:
                if current_rrset is not None:
                    # If this isn't the first iteration
                    rrsets.append(current_rrset)
                # Set up a new rrset
                current_rrset = objects.RecordSet()

                rrset_id = record[rs_map['id']]

                # Add all the loaded vars into RecordSet object

                for key, value in rs_map.items():
                    setattr(current_rrset, key, record[value])

                current_rrset.zone_name = id_zname_map[current_rrset.id]
                current_rrset.obj_reset_changes(['zone_name'])

                current_rrset.records = objects.RecordList()

                if record[r_map['id']] is not None:
                    rrdata = objects.Record()

                    for key, value in r_map.items():
                        setattr(rrdata, key, record[value])

                    current_rrset.records.append(rrdata)

            else:
                # We've already got an rrset, add the rdata
                if record[r_map['id']] is not None:
                    rrdata = objects.Record()

                    for key, value in r_map.items():
                        setattr(rrdata, key, record[value])

                    current_rrset.records.append(rrdata)

        # If the last record examined was a new rrset, or there is only 1 rrset
        if len(rrsets) == 0 or \
                (len(rrsets) != 0 and rrsets[-1] != current_rrset):
            if current_rrset is not None:
                rrsets.append(current_rrset)

        return total_count, rrsets