Exemplo n.º 1
0
    def _lr_post_command(self, data):
        retries = self.MAX_RETRY_COUNT

        if "name" in data and data["name"] not in self.session_data[
                "supported_commands"]:
            raise ApiError("Command {0} not supported by this device".format(
                data["name"]))

        while retries:
            try:
                data["session_id"] = self.session_id
                resp = self._cb.post_object(
                    "{cblr_base}/sessions/{0}/commands".format(
                        self.session_id, cblr_base=self.cblr_base), data)
            except ObjectNotFoundError as e:
                try:
                    error_message = json.loads(e.message)
                    if error_message["error_code"] == "NOT_FOUND":
                        self.session_id, self.session_data = \
                            self._cblr_manager._get_or_create_session(self.device_id)
                        retries -= 1
                        continue
                except Exception:
                    pass
                raise ApiError("Received 404 error from server: {0}".format(
                    e.message))
            else:
                return resp

        raise TimeoutError(
            message="Command {0} failed after {1} retries".format(
                data["name"], self.MAX_RETRY_COUNT))
    def _search(self):
        """Execute the query until 'processed_segments' == 'total_segments'"""
        args = self._get_query_parameters()
        self._validate(args)
        still_querying, last_processed_segments, retry_counter = (True, -1, 0)
        while still_querying:
            url, code, result = self._submit()

            self._total_results = result.get("num_available", 0)
            self._total_segments = result.get("total_segments", 0)
            self._processed_segments = result.get("processed_segments", 0)
            self._count_valid = True
            if self._processed_segments != self._total_segments:
                retry_counter = 0 if self._processed_segments > last_processed_segments else retry_counter + 1
                last_processed_segments = max(last_processed_segments,
                                              self._processed_segments)
                if retry_counter == MAX_EVENT_SEARCH_RETRIES:
                    raise TimeoutError(
                        url, code,
                        "excessive number of retries in event facet query")
                time.sleep(1 + retry_counter / 10)
                continue  # loop until we get all segments back

            # processed_segments == total_segments, end the search
            return self._doc_class(self._cb,
                                   model_unique_id=self._query_token,
                                   initial_data=result)
Exemplo n.º 3
0
    def _search(self, start=0, rows=0):
        if not self._query_token:
            self._submit()

        while self._still_querying():
            time.sleep(.5)

        if self._timed_out:
            raise TimeoutError(
                message=
                "user-specified timeout exceeded while waiting for results")

        log.debug("Pulling results, timed_out={}".format(self._timed_out))

        current = start
        rows_fetched = 0
        still_fetching = True
        result_url_template = "/api/investigate/v2/orgs/{}/processes/search_jobs/{}/results".format(
            self._cb.credentials.org_key, self._query_token)
        query_parameters = {}
        while still_fetching:
            result_url = '{}?start={}&rows={}'.format(
                result_url_template,
                current,
                10  # Batch gets to reduce API calls
            )

            result = self._cb.get_object(result_url,
                                         query_parameters=query_parameters)

            self._total_results = result.get('num_available', 0)
            self._count_valid = True

            results = result.get('results', [])

            for item in results:
                yield item
                current += 1
                rows_fetched += 1

                if rows and rows_fetched >= rows:
                    still_fetching = False
                    break

            if current >= self._total_results:
                still_fetching = False

            log.debug("current: {}, total_results: {}".format(
                current, self._total_results))
Exemplo n.º 4
0
    def _count(self):
        if self._count_valid:
            return self._total_results

        while self._still_querying():
            time.sleep(.5)

        if self._timed_out:
            raise TimeoutError(
                message=
                "user-specified timeout exceeded while waiting for results")

        result_url = "/api/investigate/v2/orgs/{}/processes/search_jobs/{}/results".format(
            self._cb.credentials.org_key,
            self._query_token,
        )
        result = self._cb.get_object(result_url)

        self._total_results = result.get('num_available', 0)
        self._count_valid = True

        return self._total_results
    def _get_or_create_session(self, device_id):
        session_id = self._create_session(device_id)

        try:
            res = poll_status(self._cb,
                              "{cblr_base}/session/{0}".format(
                                  session_id, cblr_base=self.cblr_base),
                              desired_status="ACTIVE",
                              delay=self._init_poll_delay,
                              timeout=self._init_poll_timeout)
        except Exception:
            # "close" the session, otherwise it will stay in a pending state
            self._close_session(session_id)

            # the Cb server will return a 404 if we don't establish a session in time, so convert this to a "timeout"
            raise TimeoutError(
                uri="{cblr_base}/session/{0}".format(session_id,
                                                     cblr_base=self.cblr_base),
                message="Could not establish session with device {0}".format(
                    device_id),
                error_code=404)
        else:
            return session_id, res
def poll_status(cb, url, desired_status="complete", timeout=None, delay=None):
    """
    Poll the status of a Live Response query.

    Args:
        cb (BaseAPI): The CBC SDK object reference.
        url (str): The URL to poll.
        desired_status (str): The status we're looking for.
        timeout (int): The timeout value in seconds.
        delay (float): The delay between attempts in seconds.

    Returns:
        object: The result of the Live Response query that has the desired status.

    Raises:
        LiveResponseError: If an error response was encountered.
    """
    start_time = time.time()
    status = None

    if not timeout:
        timeout = 120
    if not delay:
        delay = 0.5

    while status != desired_status and time.time() - start_time < timeout:
        res = cb.get_object(url)
        log.error(f"url: {url} -> status: {res['status']}")
        if res["status"] == desired_status:
            log.debug(json.dumps(res))
            return res
        elif res["status"] == "error":
            raise LiveResponseError(res)
        else:
            time.sleep(delay)

    raise TimeoutError(uri=url, message="timeout polling for Live Response")
    def _search(self, start=0, rows=0):
        """
        Execute the query, iterating over results 500 rows at a time.

        Args:
           start (int): What index to begin retrieving results from.
           rows (int): Total number of results to be retrieved.
                       If `start` is not specified, the default of 0 will be used.
                       If `rows` is not specified, the query will continue until all available results have
                       been retrieved, getting results in batches of 500.
        """
        # iterate over total result set, 100 at a time
        args = self._get_query_parameters()
        self._validate(args)

        if start != 0:
            args['start'] = start
        args['rows'] = self._batch_size

        current = start
        numrows = 0

        still_querying, last_processed_segments, retry_counter = (True, -1, 0)

        while still_querying:
            url = self._doc_class.urlobject.format(
                self._cb.credentials.org_key, args["process_guid"])
            resp = self._cb.post_object(url, body=args)
            result = resp.json()

            self._total_results = result.get("num_available", 0)
            self._total_segments = result.get("total_segments", 0)
            self._processed_segments = result.get("processed_segments", 0)
            self._count_valid = True
            if self._processed_segments != self._total_segments:
                retry_counter = 0 if self._processed_segments > last_processed_segments else retry_counter + 1
                last_processed_segments = max(last_processed_segments,
                                              self._processed_segments)
                if retry_counter == MAX_EVENT_SEARCH_RETRIES:
                    raise TimeoutError(
                        url, resp.status_code,
                        "excessive number of retries in event query")
                time.sleep(1 + retry_counter / 10)
                continue  # loop until we get all segments back

            results = result.get('results', [])

            for item in results:
                yield item
                current += 1

                numrows += 1
                if rows and numrows == rows:
                    still_querying = False
                    break

            args['start'] = current

            if current >= self._total_results:
                break
            if not results:
                log.debug(
                    "server reported total_results overestimated the number of results for this query by {0}"
                    .format(self._total_results - current))
                log.debug(
                    "resetting total_results for this query to {0}".format(
                        current))
                self._total_results = current
                break