コード例 #1
0
 def http_get(self, url, ignore_errors=False):
     """
     Perform HTTP/HTTPS get and return result
     :param url: Request URL
     :param ignore_errors: Ignore response error and return header and body
     :returns" Result as a string, or None in case of errors
     """
     self.logger.debug("HTTP GET %s", url)
     code, header, body = yield fetch(
         url,
         request_timeout=config.activator.http_request_timeout,
         follow_redirects=True,
         validate_cert=config.activator.http_validate_cert,
         eof_mark="</html>",
     )
     if 200 <= code <= 299:
         raise tornado.gen.Return(body)
     elif ignore_errors:
         metrics["error", ("type", "http_error_%s" % code)] += 1
         self.logger.debug("HTTP GET %s failed: %s %s", url, code, body)
         raise tornado.gen.Return(str(header) + str(body))
     else:
         metrics["error", ("type", "http_error_%s" % code)] += 1
         self.logger.debug("HTTP GET %s failed: %s %s", url, code, body)
         raise tornado.gen.Return(None)
コード例 #2
0
 def fetch(self, url, method="GET", headers=None, body=None):
     url = "%s%s" % (self.base_url, url)
     code, headers, body = yield fetch(url=url,
                                       method=method,
                                       headers=headers,
                                       body=body,
                                       io_loop=self.io_loop)
     raise tornado.gen.Return((code, headers, body))
コード例 #3
0
ファイル: pub.py プロジェクト: gabrielat/noc
def mpub(topic, messages, dcs=None, io_loop=None, retries=None):
    """
    Asynchronously publish message to NSQ topic

    :param topic: NSQ topic
    :param messages: List of strings containing messages
    :param dcs: DSC instance
    :param io_loop: IOLoop instance
    :param retries: Error retries. config.nsqd.pub_retries by default
    :return: None
    :raises NSQPubError: On publish error
    """
    if not messages:
        raise tornado.gen.Return()
    if not dcs:
        # No global DCS, instantiate one
        dcs = get_dcs(ioloop=io_loop)
    # Build body
    msg = mpub_encode(messages)
    # Setup resolver
    services = nsqd_http_service_param.services
    num_services = len(services)
    if num_services > 1:
        s_index = random.randint(0, num_services - 1)
    else:
        s_index = 0
    # Post message
    retries = retries or config.nsqd.pub_retries
    code = 200
    body = None
    metrics["nsq_mpub", ("topic", topic)] += 1
    while retries > 0:
        # Get actual nsqd service's address and port
        si = services[s_index]
        if not nsqd_http_service_param.is_static(si):
            si = yield dcs.resolve(si, near=True)
        # Send message
        code, _, body = yield fetch(
            "http://%s/mpub?topic=%s&binary=true" % (si, topic),
            method="POST",
            body=msg,
            io_loop=io_loop,
            connect_timeout=config.nsqd.connect_timeout,
            request_timeout=config.nsqd.request_timeout,
        )
        if code == 200:
            break
        metrics["nsq_mpub_error", ("topic", topic)] += 1
        logger.error("Failed to pub to topic '%s': %s (Code=%d)", topic, body,
                     code)
        retries -= 1
        if retries > 0:
            yield tornado.gen.sleep(config.nsqd.pub_retry_delay)
            s_index = (s_index + 1) % num_services
    if code != 200:
        logger.error("Failed to pub to topic '%s'. Giving up", topic)
        metrics["nsq_mpub_fail", ("topic", topic)] += 1
        raise NSQPubError("NSQ Pub error: code=%s message=%s" % (code, body))
コード例 #4
0
 def query(self, change_id=None, filters=None, block=False, limit=None, io_loop=None):
     """
     Query datastream
     :param filters:
     :return:
     """
     # Basic URL and query
     base_url = "http://datastream/api/datastream/%s" % self.name
     base_qs = []
     if filters:
         base_qs += ["filter=%s" % x for x in filters]
     if block:
         base_qs += ["block=1"]
     if limit:
         base_qs += ["limit=%d" % limit]
     req_headers = {"X-NOC-API-Access": "datastream:%s" % self.name}
     # Continue until finish
     while True:
         # Build URL
         # *datastream* host name will be resolved with *resolve* method
         qs = base_qs[:]
         if change_id:
             qs += ["from=%s" % change_id]
         if qs:
             url = "%s?%s" % (base_url, "&".join(qs))
         else:
             url = base_url
         # Get data
         logger.debug("Request: %s", url)
         code, headers, data = yield fetch(
             url, io_loop=io_loop, resolver=self.resolve, headers=req_headers
         )
         logger.debug("Response: %s %s", code, headers)
         if code == ERR_TIMEOUT or code == ERR_READ_TIMEOUT:
             continue  # Retry on timeout
         elif code != 200:
             logger.info("Invalid response code: %s", code)
             raise NOCError(code=ERR_DS_BAD_CODE, msg="Invalid response code %s" % code)
         # Parse response
         try:
             data = ujson.loads(data)
         except ValueError as e:
             logger.info("Cannot parse response: %s", e)
             raise NOCError(code=ERR_DS_PARSE_ERROR, msg="Cannot parse response: %s" % e)
         # Process result
         for item in data:
             if "$deleted" in item:
                 self.on_delete(item)
             else:
                 self.on_change(item)
         # Continue from last change
         if "X-NOC-DataStream-Last-Change" in headers:
             change_id = headers["X-NOC-DataStream-Last-Change"]
         elif not block:
             break  # Empty batch, stop if non-blocking mode
コード例 #5
0
    def query_lookupd(self):
        logger.info("query_lookupd")
        endpoint = self.lookupd_http_addresses[self.lookupd_query_index]
        self.lookupd_query_index = (self.lookupd_query_index + 1) % len(
            self.lookupd_http_addresses)

        # urlsplit() is faulty if scheme not present
        if "://" not in endpoint:
            endpoint = "http://" + endpoint

        scheme, netloc, path, query, fragment = urlsplit(endpoint)

        if not path or path == "/":
            path = "/lookup"

        params = parse_qs(query)
        params["topic"] = self.topic
        query = urlencode(_utf8_params(params), doseq=1)
        lookupd_url = urlunsplit((scheme, netloc, path, query, fragment))

        code, headers, body = yield fetch(
            lookupd_url,
            headers={"Accept": "application/vnd.nsq; version=1.0"},
            connect_timeout=self.lookupd_connect_timeout,
            request_timeout=self.lookupd_request_timeout,
        )

        if not (200 <= code <= 299):
            metrics["error",
                    ("type", "nsqlookupd_query_error_code %s" % code)] += 1
            logger.warning("[%s] lookupd %s query error: %s %s", self.name,
                           lookupd_url, code, body)
            return
        # Decode response
        try:
            lookup_data = ujson.loads(body)
        except ValueError as e:
            metrics["error", ("type", "nsqlookupd_invalid_json")] += 1
            logger.warning("[%s] lookupd %s failed to parse JSON: %s",
                           self.name, lookupd_url, e)
            return

        if "data" in lookup_data:
            # Pre 1.0.0-compat
            producers = lookup_data["data"]["producers"]
        else:
            # 1.0.0-compat
            producers = lookup_data["producers"]
        for producer in producers:
            address = producer.get("broadcast_address",
                                   producer.get("address"))
            assert address
            self.connect_to_nsqd(address, producer["tcp_port"])
コード例 #6
0
ファイル: service.py プロジェクト: skripkar/noc
 def flush_channel(self, channel):
     channel.start_flushing()
     n = channel.n
     data = channel.get_data()
     t0 = perf_counter()
     self.logger.debug("[%s] Sending %s records", channel.name, n)
     written = False
     suspended = False
     try:
         code, headers, body = yield fetch(
             channel.url,
             method="POST",
             body=data,
             user=config.clickhouse.rw_user,
             password=config.clickhouse.rw_password,
             content_encoding=config.clickhouse.encoding
         )
         if code == 200:
             self.logger.info(
                 "[%s] %d records sent in %.2fms",
                 channel.name,
                 n, (perf_counter() - t0) * 1000
             )
             metrics["records_written"] += n
             metrics["records_buffered"] -= n
             written = True
         elif code in self.CH_SUSPEND_ERRORS:
             self.logger.info(
                 "[%s] Timed out: %s",
                 channel.name, body
             )
             metrics["error", ("type", "records_spool_timeouts")] += 1
             suspended = True
         else:
             self.logger.info(
                 "[%s] Failed to write records: %s %s",
                 channel.name,
                 code, body
             )
             metrics["error", ("type", "records_spool_failed")] += 1
     except Exception as e:
         self.logger.error(
             "[%s] Failed to spool %d records due to unknown error: %s",
             channel.name, n, e
         )
     channel.stop_flushing()
     if not written:
         # Return data back to channel
         channel.feed(data)
         if suspended:
             self.suspend()
         else:
             self.requeue_channel(channel)
コード例 #7
0
 def _request(self, callback, url, method="GET", body=None):
     code, headers, body = yield fetch(
         url,
         method=method,
         body=body,
         connect_timeout=CONSUL_CONNECT_TIMEOUT,
         request_timeout=CONSUL_REQUEST_TIMEOUT,
         validate_cert=self.verify)
     if code in ConsulRepearableCodes:
         raise consul.base.Timeout
     raise tornado.gen.Return(
         callback(
             consul.base.Response(code=code, headers=headers, body=body)))
コード例 #8
0
 def make_call(url, body, limit=3):
     req_headers = {
         "X-NOC-Calling-Service": self._service.name,
         "Content-Type": "text/json",
     }
     sample = 1 if span_ctx and span_id else 0
     with Span(
             server=self._service_name,
             service=method,
             sample=sample,
             context=span_ctx,
             parent=span_id,
     ) as span:
         if sample:
             req_headers["X-NOC-Span-Ctx"] = span.span_context
             req_headers["X-NOC-Span"] = span.span_id
         code, headers, data = yield fetch(
             url,
             method="POST",
             headers=req_headers,
             body=body,
             connect_timeout=CONNECT_TIMEOUT,
             request_timeout=REQUEST_TIMEOUT,
         )
         # Process response
         if code == 200:
             raise tornado.gen.Return(data)
         elif code == 307:
             # Process redirect
             if not limit:
                 raise RPCException("Redirects limit exceeded")
             url = headers.get("location")
             self._logger.debug("Redirecting to %s", url)
             r = yield make_call(url, data, limit - 1)
             raise tornado.gen.Return(r)
         elif code in (598, 599):
             span.error_code = code
             self._logger.debug("Timed out")
             raise tornado.gen.Return(None)
         else:
             span.error_code = code
             raise RPCHTTPError("HTTP Error %s: %s" % (code, body))
コード例 #9
0
ファイル: service.py プロジェクト: skripkar/noc
 def check_restore(self):
     if self.stopping:
         self.logger.info("Checking restore during stopping. Ignoring")
     else:
         code, headers, body = yield fetch(
             "http://%s/?user=%s&password=%s&database=%s&query=%s" % (
                 self.ch_address,
                 config.clickhouse.rw_user,
                 config.clickhouse.rw_password,
                 config.clickhouse.db,
                 "SELECT%20dummy%20FROM%20system.one"
             )
         )
         if code == 200:
             self.resume()
         else:
             self.restore_timeout = self.ioloop.add_timeout(
                 self.ioloop.time() + float(config.chwriter.suspend_timeout_ms) / 1000.0,
                 self.check_restore
             )