コード例 #1
0
ファイル: search.py プロジェクト: Rome84/AWS-BOTO1
    def __init__(self, domain=None, endpoint=None):
        self.domain = domain
        self.endpoint = endpoint
        self.session = requests.Session()

        # Endpoint needs to be set before initializing CloudSearchDomainConnection
        if not endpoint:
            self.endpoint = domain.search_service_endpoint

        # Copy proxy settings from connection and check if request should be signed
        self.sign_request = False
        if self.domain and self.domain.layer1:
            if self.domain.layer1.use_proxy:
                self.session.proxies['http'] = self.domain.layer1.get_proxy_url_with_auth()

            self.sign_request = getattr(self.domain.layer1, 'sign_request', False)

            if self.sign_request:
                layer1 = self.domain.layer1
                self.domain_connection = CloudSearchDomainConnection(
                    host=self.endpoint,
                    aws_access_key_id=layer1.aws_access_key_id,
                    aws_secret_access_key=layer1.aws_secret_access_key,
                    region=layer1.region,
                    provider=layer1.provider
                )
コード例 #2
0
    def __init__(self, domain=None, endpoint=None):
        self.domain = domain
        self.endpoint = endpoint
        if not self.endpoint:
            self.endpoint = domain.doc_service_endpoint
        self.documents_batch = []
        self._sdf = None

        # Copy proxy settings from connection and check if request should be signed
        self.proxy = {}
        self.sign_request = False
        if self.domain and self.domain.layer1:
            if self.domain.layer1.use_proxy:
                self.proxy = {
                    'http': self.domain.layer1.get_proxy_url_with_auth()
                }

            self.sign_request = getattr(self.domain.layer1, 'sign_request',
                                        False)

            if self.sign_request:
                # Create a domain connection to send signed requests
                layer1 = self.domain.layer1
                self.domain_connection = CloudSearchDomainConnection(
                    host=self.endpoint,
                    aws_access_key_id=layer1.aws_access_key_id,
                    aws_secret_access_key=layer1.aws_secret_access_key,
                    region=layer1.region,
                    provider=layer1.provider)
コード例 #3
0
ファイル: document.py プロジェクト: 10sr/hue
    def __init__(self, domain=None, endpoint=None):
        self.domain = domain
        self.endpoint = endpoint
        if not self.endpoint:
            self.endpoint = domain.doc_service_endpoint
        self.documents_batch = []
        self._sdf = None

        # Copy proxy settings from connection and check if request should be signed
        self.proxy = {}
        self.sign_request = False
        if self.domain and self.domain.layer1:
            if self.domain.layer1.use_proxy:
                self.proxy = {'http': self.domain.layer1.get_proxy_url_with_auth()}

            self.sign_request = getattr(self.domain.layer1, 'sign_request', False)

            if self.sign_request:
                # Create a domain connection to send signed requests
                layer1 = self.domain.layer1
                self.domain_connection = CloudSearchDomainConnection(
                    host=self.endpoint,
                    aws_access_key_id=layer1.aws_access_key_id,
                    aws_secret_access_key=layer1.aws_secret_access_key,
                    region=layer1.region,
                    provider=layer1.provider
                )
コード例 #4
0
ファイル: search.py プロジェクト: 10sr/hue
    def __init__(self, domain=None, endpoint=None):
        self.domain = domain
        self.endpoint = endpoint
        self.session = requests.Session()

        # Endpoint needs to be set before initializing CloudSearchDomainConnection
        if not endpoint:
            self.endpoint = domain.search_service_endpoint

        # Copy proxy settings from connection and check if request should be signed
        self.sign_request = False
        if self.domain and self.domain.layer1:
            if self.domain.layer1.use_proxy:
                self.session.proxies['http'] = self.domain.layer1.get_proxy_url_with_auth()

            self.sign_request = getattr(self.domain.layer1, 'sign_request', False)

            if self.sign_request:
                layer1 = self.domain.layer1
                self.domain_connection = CloudSearchDomainConnection(
                    host=self.endpoint,
                    aws_access_key_id=layer1.aws_access_key_id,
                    aws_secret_access_key=layer1.aws_secret_access_key,
                    region=layer1.region,
                    provider=layer1.provider
                )
コード例 #5
0
def delete_doc_in_cloudsearch(doc_ids,
                              cloudsearch_domain_name,
                              validate=True,
                              region='ap-southeast-2'):
    """

    :param doc_ids: the doc id array
    :param cloudsearch_domain_name: the domain name

    !!! This method is in-efficient for validating.
    """

    conn = boto.cloudsearch2.connect_to_region(region)
    domain = conn.describe_domains([cloudsearch_domain_name]) \
        ['DescribeDomainsResponse']['DescribeDomainsResult']['DomainStatusList']
    doc_service = CloudSearchDomainConnection(
        host=domain[0]['DocService']['Endpoint'], region=region)
    if len(doc_ids) > 0:
        batch = []
        for id in doc_ids:
            batch.append({"id": str(id), "type": "delete"})
        doc_service.upload_documents(json.dumps(batch), 'application/json')

    if validate:
        # we need some time for the cloudsearch to apply the deletion.
        MAX_RETRIES = 100
        SLEEP_GAP = 10
        for id in doc_ids:
            retry = 0
            while retry < MAX_RETRIES:
                result = doc_service.search(query="(term field=_id '%s')" % id,
                                            query_parser='structured')
                if (result['hits']['found'] == 0):
                    break
                retry += 1
                sleep(SLEEP_GAP)
            if retry >= MAX_RETRIES:
                raise ValueError(
                    "Intended to delete cloudsearch documents with id %s, but it still exists after deletion. "
                    "Culprit document: %s" % (doc_ids, id))
コード例 #6
0
def connect_cloudsearchdomain(aws_access_key_id=None,
                              aws_secret_access_key=None,
                              **kwargs):
    """
    :type aws_access_key_id: string
    :param aws_access_key_id: Your AWS Access Key ID

    :type aws_secret_access_key: string
    :param aws_secret_access_key: Your AWS Secret Access Key

    :rtype: :class:`boto.cloudsearchdomain.layer1.CloudSearchDomainConnection`
    :return: A connection to Amazon's CloudSearch Domain service
    """
    from boto.cloudsearchdomain.layer1 import CloudSearchDomainConnection
    return CloudSearchDomainConnection(aws_access_key_id,
                                       aws_secret_access_key, **kwargs)
コード例 #7
0
ファイル: search.py プロジェクト: Rome84/AWS-BOTO1
class SearchConnection(object):

    def __init__(self, domain=None, endpoint=None):
        self.domain = domain
        self.endpoint = endpoint
        self.session = requests.Session()

        # Endpoint needs to be set before initializing CloudSearchDomainConnection
        if not endpoint:
            self.endpoint = domain.search_service_endpoint

        # Copy proxy settings from connection and check if request should be signed
        self.sign_request = False
        if self.domain and self.domain.layer1:
            if self.domain.layer1.use_proxy:
                self.session.proxies['http'] = self.domain.layer1.get_proxy_url_with_auth()

            self.sign_request = getattr(self.domain.layer1, 'sign_request', False)

            if self.sign_request:
                layer1 = self.domain.layer1
                self.domain_connection = CloudSearchDomainConnection(
                    host=self.endpoint,
                    aws_access_key_id=layer1.aws_access_key_id,
                    aws_secret_access_key=layer1.aws_secret_access_key,
                    region=layer1.region,
                    provider=layer1.provider
                )

    def build_query(self, q=None, parser=None, fq=None, rank=None, return_fields=None,
                    size=10, start=0, facet=None, highlight=None, sort=None,
                    partial=None, options=None):
        return Query(q=q, parser=parser, fq=fq, expr=rank, return_fields=return_fields,
                     size=size, start=start, facet=facet, highlight=highlight,
                     sort=sort, partial=partial, options=options)

    def search(self, q=None, parser=None, fq=None, rank=None, return_fields=None,
               size=10, start=0, facet=None, highlight=None, sort=None, partial=None,
               options=None):
        """
        Send a query to CloudSearch

        Each search query should use at least the q or bq argument to specify
        the search parameter. The other options are used to specify the
        criteria of the search.

        :type q: string
        :param q: A string to search the default search fields for.

        :type parser: string
        :param parser: The parser to use. 'simple', 'structured', 'lucene', 'dismax'

        :type fq: string
        :param fq: The filter query to use.

        :type sort: List of strings
        :param sort: A list of fields or rank expressions used to order the
            search results. Order is handled by adding 'desc' or 'asc' after the field name.
            ``['year desc', 'author asc']``

        :type return_fields: List of strings
        :param return_fields: A list of fields which should be returned by the
            search. If this field is not specified, only IDs will be returned.
            ``['headline']``

        :type size: int
        :param size: Number of search results to specify

        :type start: int
        :param start: Offset of the first search result to return (can be used
            for paging)

        :type facet: dict
        :param facet: Dictionary of fields for which facets should be returned
            The facet value is string of JSON options
            ``{'year': '{sort:"bucket", size:3}', 'genres': '{buckets:["Action","Adventure","Sci-Fi"]}'}``

        :type highlight: dict
        :param highlight: Dictionary of fields for which highlights should be returned
            The facet value is string of JSON options
            ``{'genres': '{format:'text',max_phrases:2,pre_tag:'<b>',post_tag:'</b>'}'}``

        :type partial: bool
        :param partial: Should partial results from a partioned service be returned if
            one or more index partitions are unreachable.

        :type options: str
        :param options: Options for the query parser specified in *parser*.
            Specified as a string in JSON format.
            ``{fields: ['title^5', 'description']}``

        :rtype: :class:`boto.cloudsearch2.search.SearchResults`
        :return: Returns the results of this search

        The following examples all assume we have indexed a set of documents
        with fields: *author*, *date*, *headline*

        A simple search will look for documents whose default text search
        fields will contain the search word exactly:

        >>> search(q='Tim') # Return documents with the word Tim in them (but not Timothy)

        A simple search with more keywords will return documents whose default
        text search fields contain the search strings together or separately.

        >>> search(q='Tim apple') # Will match "tim" and "apple"

        More complex searches require the boolean search operator.

        Wildcard searches can be used to search for any words that start with
        the search string.

        >>> search(q="'Tim*'") # Return documents with words like Tim or Timothy)

        Search terms can also be combined. Allowed operators are "and", "or",
        "not", "field", "optional", "token", "phrase", or "filter"

        >>> search(q="(and 'Tim' (field author 'John Smith'))", parser='structured')

        Facets allow you to show classification information about the search
        results. For example, you can retrieve the authors who have written
        about Tim with a max of 3

        >>> search(q='Tim', facet={'Author': '{sort:"bucket", size:3}'})
        """

        query = self.build_query(q=q, parser=parser, fq=fq, rank=rank,
                                 return_fields=return_fields,
                                 size=size, start=start, facet=facet,
                                 highlight=highlight, sort=sort,
                                 partial=partial, options=options)
        return self(query)

    def _search_with_auth(self, params):
        return self.domain_connection.search(params.pop("q", ""), **params)

    def _search_without_auth(self, params, api_version):
        url = "http://%s/%s/search" % (self.endpoint, api_version)
        resp = self.session.get(url, params=params)

        return {'body': resp.content.decode('utf-8'), 'status_code': resp.status_code}

    def __call__(self, query):
        """Make a call to CloudSearch

        :type query: :class:`boto.cloudsearch2.search.Query`
        :param query: A group of search criteria

        :rtype: :class:`boto.cloudsearch2.search.SearchResults`
        :return: search results
        """
        api_version = '2013-01-01'
        if self.domain and self.domain.layer1:
            api_version = self.domain.layer1.APIVersion

        if self.sign_request:
            data = self._search_with_auth(query.to_domain_connection_params())
        else:
            r = self._search_without_auth(query.to_params(), api_version)

            _body = r['body']
            _status_code = r['status_code']

            try:
                data = json.loads(_body)
            except ValueError:
                if _status_code == 403:
                    msg = ''
                    import re
                    g = re.search('<html><body><h1>403 Forbidden</h1>([^<]+)<', _body)
                    try:
                        msg = ': %s' % (g.groups()[0].strip())
                    except AttributeError:
                        pass
                    raise SearchServiceException('Authentication error from Amazon%s' % msg)
                raise SearchServiceException("Got non-json response from Amazon. %s" % _body, query)

        if 'messages' in data and 'error' in data:
            for m in data['messages']:
                if m['severity'] == 'fatal':
                    raise SearchServiceException("Error processing search %s "
                        "=> %s" % (params, m['message']), query)
        elif 'error' in data:
            raise SearchServiceException("Unknown error processing search %s"
                % json.dumps(data), query)

        data['query'] = query
        data['search_service'] = self

        return SearchResults(**data)

    def get_all_paged(self, query, per_page):
        """Get a generator to iterate over all pages of search results

        :type query: :class:`boto.cloudsearch2.search.Query`
        :param query: A group of search criteria

        :type per_page: int
        :param per_page: Number of docs in each :class:`boto.cloudsearch2.search.SearchResults` object.

        :rtype: generator
        :return: Generator containing :class:`boto.cloudsearch2.search.SearchResults`
        """
        query.update_size(per_page)
        page = 0
        num_pages_needed = 0
        while page <= num_pages_needed:
            results = self(query)
            num_pages_needed = results.num_pages_needed
            yield results
            query.start += query.real_size
            page += 1

    def get_all_hits(self, query):
        """Get a generator to iterate over all search results

        Transparently handles the results paging from Cloudsearch
        search results so even if you have many thousands of results
        you can iterate over all results in a reasonably efficient
        manner.

        :type query: :class:`boto.cloudsearch2.search.Query`
        :param query: A group of search criteria

        :rtype: generator
        :return: All docs matching query
        """
        page = 0
        num_pages_needed = 0
        while page <= num_pages_needed:
            results = self(query)
            num_pages_needed = results.num_pages_needed
            for doc in results:
                yield doc
            query.start += query.real_size
            page += 1

    def get_num_hits(self, query):
        """Return the total number of hits for query

        :type query: :class:`boto.cloudsearch2.search.Query`
        :param query: a group of search criteria

        :rtype: int
        :return: Total number of hits for query
        """
        query.update_size(1)
        return self(query).hits
コード例 #8
0
ファイル: document.py プロジェクト: 10sr/hue
class DocumentServiceConnection(object):
    """
    A CloudSearch document service.

    The DocumentServiceConection is used to add, remove and update documents in
    CloudSearch. Commands are uploaded to CloudSearch in SDF (Search Document
    Format).

    To generate an appropriate SDF, use :func:`add` to add or update documents,
    as well as :func:`delete` to remove documents.

    Once the set of documents is ready to be index, use :func:`commit` to send
    the commands to CloudSearch.

    If there are a lot of documents to index, it may be preferable to split the
    generation of SDF data and the actual uploading into CloudSearch. Retrieve
    the current SDF with :func:`get_sdf`. If this file is the uploaded into S3,
    it can be retrieved back afterwards for upload into CloudSearch using
    :func:`add_sdf_from_s3`.

    The SDF is not cleared after a :func:`commit`. If you wish to continue
    using the DocumentServiceConnection for another batch upload of commands,
    you will need to :func:`clear_sdf` first to stop the previous batch of
    commands from being uploaded again.

    """

    def __init__(self, domain=None, endpoint=None):
        self.domain = domain
        self.endpoint = endpoint
        if not self.endpoint:
            self.endpoint = domain.doc_service_endpoint
        self.documents_batch = []
        self._sdf = None

        # Copy proxy settings from connection and check if request should be signed
        self.proxy = {}
        self.sign_request = False
        if self.domain and self.domain.layer1:
            if self.domain.layer1.use_proxy:
                self.proxy = {'http': self.domain.layer1.get_proxy_url_with_auth()}

            self.sign_request = getattr(self.domain.layer1, 'sign_request', False)

            if self.sign_request:
                # Create a domain connection to send signed requests
                layer1 = self.domain.layer1
                self.domain_connection = CloudSearchDomainConnection(
                    host=self.endpoint,
                    aws_access_key_id=layer1.aws_access_key_id,
                    aws_secret_access_key=layer1.aws_secret_access_key,
                    region=layer1.region,
                    provider=layer1.provider
                )

    def add(self, _id, fields):
        """
        Add a document to be processed by the DocumentService

        The document will not actually be added until :func:`commit` is called

        :type _id: string
        :param _id: A unique ID used to refer to this document.

        :type fields: dict
        :param fields: A dictionary of key-value pairs to be uploaded .
        """

        d = {'type': 'add', 'id': _id, 'fields': fields}
        self.documents_batch.append(d)

    def delete(self, _id):
        """
        Schedule a document to be removed from the CloudSearch service

        The document will not actually be scheduled for removal until
        :func:`commit` is called

        :type _id: string
        :param _id: The unique ID of this document.
        """

        d = {'type': 'delete', 'id': _id}
        self.documents_batch.append(d)

    def get_sdf(self):
        """
        Generate the working set of documents in Search Data Format (SDF)

        :rtype: string
        :returns: JSON-formatted string of the documents in SDF
        """

        return self._sdf if self._sdf else json.dumps(self.documents_batch)

    def clear_sdf(self):
        """
        Clear the working documents from this DocumentServiceConnection

        This should be used after :func:`commit` if the connection will be
        reused for another set of documents.
        """

        self._sdf = None
        self.documents_batch = []

    def add_sdf_from_s3(self, key_obj):
        """
        Load an SDF from S3

        Using this method will result in documents added through
        :func:`add` and :func:`delete` being ignored.

        :type key_obj: :class:`boto.s3.key.Key`
        :param key_obj: An S3 key which contains an SDF
        """
        #@todo:: (lucas) would be nice if this could just take an s3://uri..."

        self._sdf = key_obj.get_contents_as_string()

    def _commit_with_auth(self, sdf, api_version):
        return self.domain_connection.upload_documents(sdf, 'application/json')

    def _commit_without_auth(self, sdf, api_version):
        url = "http://%s/%s/documents/batch" % (self.endpoint, api_version)

        # Keep-alive is automatic in a post-1.0 requests world.
        session = requests.Session()
        session.proxies = self.proxy
        adapter = requests.adapters.HTTPAdapter(
            pool_connections=20,
            pool_maxsize=50,
            max_retries=5
        )
        session.mount('http://', adapter)
        session.mount('https://', adapter)

        resp = session.post(url, data=sdf, headers={'Content-Type': 'application/json'})
        return resp

    def commit(self):
        """
        Actually send an SDF to CloudSearch for processing

        If an SDF file has been explicitly loaded it will be used. Otherwise,
        documents added through :func:`add` and :func:`delete` will be used.

        :rtype: :class:`CommitResponse`
        :returns: A summary of documents added and deleted
        """

        sdf = self.get_sdf()

        if ': null' in sdf:
            boto.log.error('null value in sdf detected. This will probably '
                           'raise 500 error.')
            index = sdf.index(': null')
            boto.log.error(sdf[index - 100:index + 100])

        api_version = '2013-01-01'
        if self.domain and self.domain.layer1:
            api_version = self.domain.layer1.APIVersion

        if self.sign_request:
            r = self._commit_with_auth(sdf, api_version)
        else:
            r = self._commit_without_auth(sdf, api_version)

        return CommitResponse(r, self, sdf, signed_request=self.sign_request)
コード例 #9
0
 def test_no_host_provided(self):
     # A host must be provided or a error is thrown.
     with self.assertRaises(ValueError):
         CloudSearchDomainConnection(
             aws_access_key_id='aws_access_key_id',
             aws_secret_access_key='aws_secret_access_key')
コード例 #10
0
class DocumentServiceConnection(object):
    """
    A CloudSearch document service.

    The DocumentServiceConection is used to add, remove and update documents in
    CloudSearch. Commands are uploaded to CloudSearch in SDF (Search Document
    Format).

    To generate an appropriate SDF, use :func:`add` to add or update documents,
    as well as :func:`delete` to remove documents.

    Once the set of documents is ready to be index, use :func:`commit` to send
    the commands to CloudSearch.

    If there are a lot of documents to index, it may be preferable to split the
    generation of SDF data and the actual uploading into CloudSearch. Retrieve
    the current SDF with :func:`get_sdf`. If this file is the uploaded into S3,
    it can be retrieved back afterwards for upload into CloudSearch using
    :func:`add_sdf_from_s3`.

    The SDF is not cleared after a :func:`commit`. If you wish to continue
    using the DocumentServiceConnection for another batch upload of commands,
    you will need to :func:`clear_sdf` first to stop the previous batch of
    commands from being uploaded again.

    """
    def __init__(self, domain=None, endpoint=None):
        self.domain = domain
        self.endpoint = endpoint
        if not self.endpoint:
            self.endpoint = domain.doc_service_endpoint
        self.documents_batch = []
        self._sdf = None

        # Copy proxy settings from connection and check if request should be signed
        self.proxy = {}
        self.sign_request = False
        if self.domain and self.domain.layer1:
            if self.domain.layer1.use_proxy:
                self.proxy = {
                    'http': self.domain.layer1.get_proxy_url_with_auth()
                }

            self.sign_request = getattr(self.domain.layer1, 'sign_request',
                                        False)

            if self.sign_request:
                # Create a domain connection to send signed requests
                layer1 = self.domain.layer1
                self.domain_connection = CloudSearchDomainConnection(
                    host=self.endpoint,
                    aws_access_key_id=layer1.aws_access_key_id,
                    aws_secret_access_key=layer1.aws_secret_access_key,
                    region=layer1.region,
                    provider=layer1.provider)

    def add(self, _id, fields):
        """
        Add a document to be processed by the DocumentService

        The document will not actually be added until :func:`commit` is called

        :type _id: string
        :param _id: A unique ID used to refer to this document.

        :type fields: dict
        :param fields: A dictionary of key-value pairs to be uploaded .
        """

        d = {'type': 'add', 'id': _id, 'fields': fields}
        self.documents_batch.append(d)

    def delete(self, _id):
        """
        Schedule a document to be removed from the CloudSearch service

        The document will not actually be scheduled for removal until
        :func:`commit` is called

        :type _id: string
        :param _id: The unique ID of this document.
        """

        d = {'type': 'delete', 'id': _id}
        self.documents_batch.append(d)

    def get_sdf(self):
        """
        Generate the working set of documents in Search Data Format (SDF)

        :rtype: string
        :returns: JSON-formatted string of the documents in SDF
        """

        return self._sdf if self._sdf else json.dumps(self.documents_batch)

    def clear_sdf(self):
        """
        Clear the working documents from this DocumentServiceConnection

        This should be used after :func:`commit` if the connection will be
        reused for another set of documents.
        """

        self._sdf = None
        self.documents_batch = []

    def add_sdf_from_s3(self, key_obj):
        """
        Load an SDF from S3

        Using this method will result in documents added through
        :func:`add` and :func:`delete` being ignored.

        :type key_obj: :class:`boto.s3.key.Key`
        :param key_obj: An S3 key which contains an SDF
        """
        #@todo:: (lucas) would be nice if this could just take an s3://uri..."

        self._sdf = key_obj.get_contents_as_string()

    def _commit_with_auth(self, sdf, api_version):
        return self.domain_connection.upload_documents(sdf, 'application/json')

    def _commit_without_auth(self, sdf, api_version):
        url = "http://%s/%s/documents/batch" % (self.endpoint, api_version)

        # Keep-alive is automatic in a post-1.0 requests world.
        session = requests.Session()
        session.proxies = self.proxy
        adapter = requests.adapters.HTTPAdapter(pool_connections=20,
                                                pool_maxsize=50,
                                                max_retries=5)
        session.mount('http://', adapter)
        session.mount('https://', adapter)

        resp = session.post(url,
                            data=sdf,
                            headers={'Content-Type': 'application/json'})
        return resp

    def commit(self):
        """
        Actually send an SDF to CloudSearch for processing

        If an SDF file has been explicitly loaded it will be used. Otherwise,
        documents added through :func:`add` and :func:`delete` will be used.

        :rtype: :class:`CommitResponse`
        :returns: A summary of documents added and deleted
        """

        sdf = self.get_sdf()

        if ': null' in sdf:
            boto.log.error('null value in sdf detected. This will probably '
                           'raise 500 error.')
            index = sdf.index(': null')
            boto.log.error(sdf[index - 100:index + 100])

        api_version = '2013-01-01'
        if self.domain and self.domain.layer1:
            api_version = self.domain.layer1.APIVersion

        if self.sign_request:
            r = self._commit_with_auth(sdf, api_version)
        else:
            r = self._commit_without_auth(sdf, api_version)

        return CommitResponse(r, self, sdf, signed_request=self.sign_request)
コード例 #11
0
ファイル: search.py プロジェクト: 10sr/hue
class SearchConnection(object):

    def __init__(self, domain=None, endpoint=None):
        self.domain = domain
        self.endpoint = endpoint
        self.session = requests.Session()

        # Endpoint needs to be set before initializing CloudSearchDomainConnection
        if not endpoint:
            self.endpoint = domain.search_service_endpoint

        # Copy proxy settings from connection and check if request should be signed
        self.sign_request = False
        if self.domain and self.domain.layer1:
            if self.domain.layer1.use_proxy:
                self.session.proxies['http'] = self.domain.layer1.get_proxy_url_with_auth()

            self.sign_request = getattr(self.domain.layer1, 'sign_request', False)

            if self.sign_request:
                layer1 = self.domain.layer1
                self.domain_connection = CloudSearchDomainConnection(
                    host=self.endpoint,
                    aws_access_key_id=layer1.aws_access_key_id,
                    aws_secret_access_key=layer1.aws_secret_access_key,
                    region=layer1.region,
                    provider=layer1.provider
                )

    def build_query(self, q=None, parser=None, fq=None, rank=None, return_fields=None,
                    size=10, start=0, facet=None, highlight=None, sort=None,
                    partial=None, options=None):
        return Query(q=q, parser=parser, fq=fq, expr=rank, return_fields=return_fields,
                     size=size, start=start, facet=facet, highlight=highlight,
                     sort=sort, partial=partial, options=options)

    def search(self, q=None, parser=None, fq=None, rank=None, return_fields=None,
               size=10, start=0, facet=None, highlight=None, sort=None, partial=None,
               options=None):
        """
        Send a query to CloudSearch

        Each search query should use at least the q or bq argument to specify
        the search parameter. The other options are used to specify the
        criteria of the search.

        :type q: string
        :param q: A string to search the default search fields for.

        :type parser: string
        :param parser: The parser to use. 'simple', 'structured', 'lucene', 'dismax'

        :type fq: string
        :param fq: The filter query to use.

        :type sort: List of strings
        :param sort: A list of fields or rank expressions used to order the
            search results. Order is handled by adding 'desc' or 'asc' after the field name.
            ``['year desc', 'author asc']``

        :type return_fields: List of strings
        :param return_fields: A list of fields which should be returned by the
            search. If this field is not specified, only IDs will be returned.
            ``['headline']``

        :type size: int
        :param size: Number of search results to specify

        :type start: int
        :param start: Offset of the first search result to return (can be used
            for paging)

        :type facet: dict
        :param facet: Dictionary of fields for which facets should be returned
            The facet value is string of JSON options
            ``{'year': '{sort:"bucket", size:3}', 'genres': '{buckets:["Action","Adventure","Sci-Fi"]}'}``

        :type highlight: dict
        :param highlight: Dictionary of fields for which highlights should be returned
            The facet value is string of JSON options
            ``{'genres': '{format:'text',max_phrases:2,pre_tag:'<b>',post_tag:'</b>'}'}``

        :type partial: bool
        :param partial: Should partial results from a partioned service be returned if
            one or more index partitions are unreachable.

        :type options: str
        :param options: Options for the query parser specified in *parser*.
            Specified as a string in JSON format.
            ``{fields: ['title^5', 'description']}``

        :rtype: :class:`boto.cloudsearch2.search.SearchResults`
        :return: Returns the results of this search

        The following examples all assume we have indexed a set of documents
        with fields: *author*, *date*, *headline*

        A simple search will look for documents whose default text search
        fields will contain the search word exactly:

        >>> search(q='Tim') # Return documents with the word Tim in them (but not Timothy)

        A simple search with more keywords will return documents whose default
        text search fields contain the search strings together or separately.

        >>> search(q='Tim apple') # Will match "tim" and "apple"

        More complex searches require the boolean search operator.

        Wildcard searches can be used to search for any words that start with
        the search string.

        >>> search(q="'Tim*'") # Return documents with words like Tim or Timothy)

        Search terms can also be combined. Allowed operators are "and", "or",
        "not", "field", "optional", "token", "phrase", or "filter"

        >>> search(q="(and 'Tim' (field author 'John Smith'))", parser='structured')

        Facets allow you to show classification information about the search
        results. For example, you can retrieve the authors who have written
        about Tim with a max of 3

        >>> search(q='Tim', facet={'Author': '{sort:"bucket", size:3}'})
        """

        query = self.build_query(q=q, parser=parser, fq=fq, rank=rank,
                                 return_fields=return_fields,
                                 size=size, start=start, facet=facet,
                                 highlight=highlight, sort=sort,
                                 partial=partial, options=options)
        return self(query)

    def _search_with_auth(self, params):
        return self.domain_connection.search(params.pop("q", ""), **params)

    def _search_without_auth(self, params, api_version):
        url = "http://%s/%s/search" % (self.endpoint, api_version)
        resp = self.session.get(url, params=params)

        return {'body': resp.content.decode('utf-8'), 'status_code': resp.status_code}

    def __call__(self, query):
        """Make a call to CloudSearch

        :type query: :class:`boto.cloudsearch2.search.Query`
        :param query: A group of search criteria

        :rtype: :class:`boto.cloudsearch2.search.SearchResults`
        :return: search results
        """
        api_version = '2013-01-01'
        if self.domain and self.domain.layer1:
            api_version = self.domain.layer1.APIVersion

        if self.sign_request:
            data = self._search_with_auth(query.to_domain_connection_params())
        else:
            r = self._search_without_auth(query.to_params(), api_version)

            _body = r['body']
            _status_code = r['status_code']

            try:
                data = json.loads(_body)
            except ValueError:
                if _status_code == 403:
                    msg = ''
                    import re
                    g = re.search('<html><body><h1>403 Forbidden</h1>([^<]+)<', _body)
                    try:
                        msg = ': %s' % (g.groups()[0].strip())
                    except AttributeError:
                        pass
                    raise SearchServiceException('Authentication error from Amazon%s' % msg)
                raise SearchServiceException("Got non-json response from Amazon. %s" % _body, query)

        if 'messages' in data and 'error' in data:
            for m in data['messages']:
                if m['severity'] == 'fatal':
                    raise SearchServiceException("Error processing search %s "
                        "=> %s" % (params, m['message']), query)
        elif 'error' in data:
            raise SearchServiceException("Unknown error processing search %s"
                % json.dumps(data), query)

        data['query'] = query
        data['search_service'] = self

        return SearchResults(**data)

    def get_all_paged(self, query, per_page):
        """Get a generator to iterate over all pages of search results

        :type query: :class:`boto.cloudsearch2.search.Query`
        :param query: A group of search criteria

        :type per_page: int
        :param per_page: Number of docs in each :class:`boto.cloudsearch2.search.SearchResults` object.

        :rtype: generator
        :return: Generator containing :class:`boto.cloudsearch2.search.SearchResults`
        """
        query.update_size(per_page)
        page = 0
        num_pages_needed = 0
        while page <= num_pages_needed:
            results = self(query)
            num_pages_needed = results.num_pages_needed
            yield results
            query.start += query.real_size
            page += 1

    def get_all_hits(self, query):
        """Get a generator to iterate over all search results

        Transparently handles the results paging from Cloudsearch
        search results so even if you have many thousands of results
        you can iterate over all results in a reasonably efficient
        manner.

        :type query: :class:`boto.cloudsearch2.search.Query`
        :param query: A group of search criteria

        :rtype: generator
        :return: All docs matching query
        """
        page = 0
        num_pages_needed = 0
        while page <= num_pages_needed:
            results = self(query)
            num_pages_needed = results.num_pages_needed
            for doc in results:
                yield doc
            query.start += query.real_size
            page += 1

    def get_num_hits(self, query):
        """Return the total number of hits for query

        :type query: :class:`boto.cloudsearch2.search.Query`
        :param query: a group of search criteria

        :rtype: int
        :return: Total number of hits for query
        """
        query.update_size(1)
        return self(query).hits