예제 #1
0
 def poll_worker(self, community, oid, timeout, version):
     while True:
         a = yield self.queue.get()
         if a:
             for c in community:
                 t0 = perf_counter()
                 try:
                     r = yield snmp_get(
                         address=a, oids=oid, community=c, version=version, timeout=timeout
                     )
                     s = "OK"
                     dt = perf_counter() - t0
                     mc = c
                     break
                 except SNMPError as e:
                     s = "FAIL"
                     r = str(e)
                     dt = perf_counter() - t0
                     mc = ""
                 except Exception as e:
                     s = "EXCEPTION"
                     r = str(e)
                     dt = perf_counter() - t0
                     mc = ""
                     break
             if self.convert:
                 try:
                     r = MACAddressParameter().clean(r)
                 except ValueError:
                     pass
             self.stdout.write("%s,%s,%s,%s,%r\n" % (a, s, dt, mc, r))
         self.queue.task_done()
         if not a:
             break
예제 #2
0
 def _get(self, timeout):
     e = None
     endtime = None
     while True:
         with self.mutex:
             if self._qsize():
                 return self.queue.popleft()
             # Waiting lock
             if not e:
                 e = thread.allocate_lock()
                 e.acquire()
             self.waiters.insert(0, e)
         # Wait for condition or timeout
         t = perf_counter()
         if not endtime:
             endtime = t + timeout
         delay = 0.0005
         while True:
             ready = e.acquire(False)
             if ready:
                 break
             remaining = endtime - t
             if remaining <= 0.0:
                 try:
                     self.waiters.remove(e)
                 except ValueError:
                     pass
                 raise IdleTimeout()
             delay = min(delay * 2, remaining, 0.05)
             time.sleep(delay)
             t = perf_counter()
예제 #3
0
 def apply_bulk_ops(self):
     if not self.bulk:
         return  # Nothing to apply
     t0 = perf_counter()
     with self.bulk_lock:
         try:
             r = self.collection.bulk_write(self.bulk)
             dt = perf_counter() - t0
             self.logger.info(
                 "%d bulk operations complete in %dms: "
                 "inserted=%d, updated=%d, removed=%d",
                 len(self.bulk),
                 int(dt * 1000),
                 r.inserted_count,
                 r.modified_count,
                 r.deleted_count,
             )
         except pymongo.errors.BulkWriteError as e:
             self.logger.error("Cannot apply bulk operations: %s [%s]",
                               e.details, e.code)
             metrics["%s_bulk_failed" % self.name] += 1
             return
         except Exception as e:
             self.logger.error("Cannot apply bulk operations: %s", e)
             metrics["%s_bulk_failed" % self.name] += 1
             return
         finally:
             self.bulk = []
예제 #4
0
파일: channel.py 프로젝트: skripkar/noc
 def __init__(self, service, fields, address, db):
     """
     :param fields: <table>.<field1>. .. .<fieldN>
     :return:
     """
     self.name = fields
     self.service = service
     self.address = address
     self.db = db
     if "|" in fields:
         # New format. Separated by '|'.
         # Nested fields are possible
         parts = tuple(fields.split("|"))
     else:
         # Old format. Separated by '.'.
         # Nested fields are not possible
         parts = tuple(fields.split("."))
     self.sql = "INSERT INTO %s(%s) FORMAT TabSeparated" % (
         parts[0], ",".join(parts[1:]))
     self.encoded_sql = urllib.quote(self.sql.encode('utf8'))
     self.n = 0
     self.data = []
     self.last_updated = perf_counter()
     self.last_flushed = perf_counter()
     self.flushing = False
     self.url = "http://%s/?user=%s&password=%s&database=%s&query=%s" % (
         address, config.clickhouse.rw_user, config.clickhouse.rw_password,
         db, self.encoded_sql)
예제 #5
0
    def __init__(self, service, table, address, db):
        """
        :param table: ClickHouse table name
        :param address: ClickHouse address
        :param db: ClickHouse database

        :return:
        """
        self.name = table
        self.service = service
        self.address = address
        self.db = db
        self.sql = "INSERT INTO %s FORMAT JSONEachRow" % table
        self.encoded_sql = urllib_quote(self.sql.encode("utf8"))
        self.n = 0
        self.data = []
        self.last_updated = perf_counter()
        self.last_flushed = perf_counter()
        self.flushing = False
        self.url = "http://%s/?user=%s&password=%s&database=%s&query=%s" % (
            address,
            config.clickhouse.rw_user,
            config.clickhouse.rw_password or "",
            db,
            self.encoded_sql,
        )
예제 #6
0
파일: service.py 프로젝트: skripkar/noc
 def flush_channel(self, channel):
     channel.start_flushing()
     n = channel.n
     data = channel.get_data()
     t0 = perf_counter()
     self.logger.debug("[%s] Sending %s records", channel.name, n)
     written = False
     suspended = False
     try:
         code, headers, body = yield fetch(
             channel.url,
             method="POST",
             body=data,
             user=config.clickhouse.rw_user,
             password=config.clickhouse.rw_password,
             content_encoding=config.clickhouse.encoding
         )
         if code == 200:
             self.logger.info(
                 "[%s] %d records sent in %.2fms",
                 channel.name,
                 n, (perf_counter() - t0) * 1000
             )
             metrics["records_written"] += n
             metrics["records_buffered"] -= n
             written = True
         elif code in self.CH_SUSPEND_ERRORS:
             self.logger.info(
                 "[%s] Timed out: %s",
                 channel.name, body
             )
             metrics["error", ("type", "records_spool_timeouts")] += 1
             suspended = True
         else:
             self.logger.info(
                 "[%s] Failed to write records: %s %s",
                 channel.name,
                 code, body
             )
             metrics["error", ("type", "records_spool_failed")] += 1
     except Exception as e:
         self.logger.error(
             "[%s] Failed to spool %d records due to unknown error: %s",
             channel.name, n, e
         )
     channel.stop_flushing()
     if not written:
         # Return data back to channel
         channel.feed(data)
         if suspended:
             self.suspend()
         else:
             self.requeue_channel(channel)
예제 #7
0
파일: ping.py 프로젝트: skripkar/noc
 def parse_reply(self, msg, addr):
     """
     Returns status, address, request_id, sequence
     """
     ip_header = msg[:20]
     (ver, tos, plen, pid, flags, ttl, proto, checksum, src_ip,
      dst_ip) = IPv4_STRUCT.unpack(ip_header)
     if proto != ICMPv4_PROTO:
         return
     icmp_header = msg[20:28]
     (icmp_type, icmp_code, icmp_checksum, req_id,
      seq) = ICMP_STRUCT.unpack(icmp_header)
     if icmp_type == ICMPv4_ECHOREPLY:
         rtt = None
         if len(msg) > 36:
             t0 = TS_STRUCT.unpack(msg[28:36])[0]
             rtt = perf_counter() - t0
         return True, addr, req_id, seq, rtt
     elif icmp_type in (ICMPv4_UNREACHABLE, ICMPv4_TTL_EXCEEDED):
         if plen >= 48:
             _, _, _, _, _, _, o_proto, _, o_src_ip, o_dst_ip = IPv4_STRUCT.unpack(
                 msg[28:48])
             if o_proto == ICMPv4_PROTO:
                 o_icmp_type, _, _, o_req_id, _ = ICMP_STRUCT.unpack(
                     msg[48:56])
                 if o_icmp_type == ICMPv4_ECHO:
                     return False, addr, req_id, seq, None
     return None, None, None, None, None
예제 #8
0
파일: topic.py 프로젝트: ewwwcha/noc
    def iter_get(self, n=1, size=None, total_overhead=0, message_overhead=0):
        """
        Get up to `n` items up to `size` size.

        Warning queue will be locked until the end of function call.

        :param n: Amount of items returned
        :param size: None - unlimited, integer - upper size limit
        :param total_overhead: Adjust total size to `total_overhead` octets.
        :param message_overhead: Adjust total size to `message_overhead` per each returned message.
        :return: Yields items
        """
        total = 0
        if size and total_overhead:
            total += total_overhead
        with self.lock:
            for _i in range(n):
                try:
                    msg = self.queue.popleft()
                    m_size = len(msg)
                    total += m_size
                    if size and message_overhead:
                        total += message_overhead
                    if size and total > size:
                        # Size limit exceeded. Return message to queue
                        self.queue.appendleft(msg)
                        break
                    self.queue_size -= m_size
                    self.msg_get += 1
                    self.msg_get_size += m_size
                    yield msg
                except IndexError:
                    break
        self.last_get = perf_counter()
예제 #9
0
파일: channel.py 프로젝트: skripkar/noc
 def is_expired(self):
     if self.n:
         return False
     t = perf_counter()
     if self.data or self.flushing:
         return False
     return t - self.last_updated > config.chwriter.channel_expire_interval
예제 #10
0
파일: span.py 프로젝트: 0pt1on/noc
 def __enter__(self):
     if config.features.forensic:
         forensic_logger.info(
             "[>%s|%s|%s] %s", self.forensic_id, self.server, self.service, self.in_label
         )
     if self.is_sampled or self.hist or self.quantile:
         self.ts0 = perf_counter()
     if not self.is_sampled:
         return self
     # Generate span ID
     self.span_id = struct.unpack("!Q", os.urandom(8))[0] & 0x7FFFFFFFFFFFFFFF
     # Get span context
     try:
         self.span_context = tls.span_context
         # Get parent
         try:
             self.span_parent = tls.span_parent
             if self.parent == DEFAULT_ID:
                 self.parent = self.span_parent
         except AttributeError:
             pass
     except AttributeError:
         self.span_context = self.context if self.context else self.span_id
         tls.span_context = self.span_context
     tls.span_parent = self.span_id
     self.start = time.time()
     return self
예제 #11
0
파일: topic.py 프로젝트: ewwwcha/noc
 def wait_async(self, timeout=None, rate=None):
     """
     Block and wait up to `timeout`
     :param timeout: Max. wait in seconds
     :param rate: Max. rate of publishing in messages per second
     :return:
     """
     # Sleep to throttle rate
     if rate and self.last_get:
         now = perf_counter()
         delta = max(self.last_get + 1.0 / rate - now, 0)
         if delta > 0:
             yield tornado.gen.sleep(delta)
             # Adjust remaining timeout
             if timeout:
                 # Adjust timeout
                 timeout -= delta
                 if timeout <= 0:
                     # Timeout expired
                     raise tornado.gen.Return()
     # Check if queue already contains messages
     if not self.queue_size:
         # No messages, wait
         if timeout is not None:
             timeout = datetime.timedelta(seconds=timeout)
         yield self.put_async_condition.wait(timeout)
예제 #12
0
파일: channel.py 프로젝트: skripkar/noc
 def is_ready(self):
     if not self.data or self.flushing:
         return False
     if self.n >= config.chwriter.batch_size:
         return True
     t = perf_counter()
     return (t - self.last_flushed) * 1000 >= config.chwriter.batch_delay_ms
예제 #13
0
 def scheduler_loop(self):
     """
     Primary scheduler loop
     """
     while not self.to_shutdown:
         t0 = perf_counter()
         n = 0
         if self.get_executor().may_submit():
             try:
                 n = yield self.executor.submit(self.scheduler_tick)
             except Exception as e:
                 self.logger.error("Failed to execute scheduler tick: %s", e)
         dt = self.check_time - (perf_counter() - t0) * 1000
         if dt > 0:
             if n:
                 dt = min(dt, self.check_time / n)
             yield tornado.gen.sleep(dt / 1000.0)
     self.apply_ops()
예제 #14
0
 def run(self):
     """
     Run script
     """
     with Span(server="activator",
               service=self.name,
               in_label=self.credentials.get("address")):
         self.start_time = perf_counter()
         self.logger.debug("Running. Input arguments: %s, timeout %s",
                           self.args, self.timeout)
         # Use cached result when available
         cache_hit = False
         if self.cache and self.parent:
             try:
                 result = self.get_cache(self.name, self.args)
                 self.logger.info("Using cached result")
                 cache_hit = True
             except KeyError:
                 pass
         # Execute script
         if not cache_hit:
             try:
                 result = self.execute(**self.args)
                 if self.cache and self.parent and result:
                     self.logger.info("Caching result")
                     self.set_cache(self.name, self.args, result)
             finally:
                 if not self.parent:
                     # Close SNMP socket when necessary
                     self.close_snmp()
                     # Close CLI socket when necessary
                     self.close_cli_stream()
                     # Close MML socket when necessary
                     self.close_mml_stream()
                     # Close RTSP socket when necessary
                     self.close_rtsp_stream()
                     # Close HTTP Client
                     self.http.close()
         # Clean result
         result = self.clean_output(result)
         self.logger.debug("Result: %s", result)
         runtime = perf_counter() - self.start_time
         self.logger.info("Complete (%.2fms)", runtime * 1000)
     return result
예제 #15
0
 def report(self):
     nm = metrics["records_written"].value
     t = perf_counter()
     if self.last_ts:
         speed = float(nm - self.last_metrics) / (t - self.last_ts)
         self.logger.info(
             "Feeding speed: %.2frecords/sec, active channels: %s, buffered records: %d",
             speed,
             metrics["channels_active"].value,
             metrics["records_buffered"].value,
         )
     self.last_metrics = nm
     self.last_ts = t
예제 #16
0
    def __exit__(self, exc_type, exc_val, exc_tb):
        def q_tsv(s):
            if not s:
                return ""
            else:
                return str(s).encode("string_escape")

        global spans
        if self.is_sampled or self.hist or self.quantile:
            self.duration = int((perf_counter() - self.ts0) * US)
        if self.hist:
            self.hist.register(self.duration)
        if self.quantile:
            self.quantile.register(self.duration)
        if config.features.forensic and hasattr(self, "forensic_id"):
            # N.B. config.features.forensic may be changed during span
            forensic_logger.info("[<%s]", self.forensic_id)
        if not self.is_sampled:
            return
        if exc_type and not self.error_text and not self.is_ignorable_error(
                exc_type):
            self.error_code = ERR_UNKNOWN
            self.error_text = str(exc_val).strip("\t").replace("\t",
                                                               " ").replace(
                                                                   "\n", " ")
        lt = time.localtime(self.start)
        row = "\t".join(
            str(x) for x in [
                time.strftime("%Y-%m-%d", lt),
                time.strftime("%Y-%m-%d %H:%M:%S", lt),
                self.span_context,
                self.span_id,
                self.parent,
                q_tsv(self.server),
                q_tsv(self.service),
                q_tsv(self.client),
                self.duration,
                self.error_code or 0,
                q_tsv(self.error_text),
                self.sample,
                ch_escape(q_tsv(self.in_label)),
                ch_escape(q_tsv(self.out_label)),
            ])
        with span_lock:
            spans += [row]
        if self.span_parent == DEFAULT_ID:
            del tls.span_parent
            del tls.span_context
        else:
            tls.span_parent = self.span_parent
        metrics["spans"] += 1
예제 #17
0
 def apply_metrics(self, d):
     """
     Append threadpool metrics to dictionary d
     :param d:
     :return:
     """
     with self.mutex:
         workers = len(self.threads)
         idle = len(self.waiters)
         d.update({
             "%s_max_workers" % self.name: self.max_workers,
             "%s_workers" % self.name: workers,
             "%s_idle_workers" % self.name: idle,
             "%s_running_workers" % self.name: workers - idle,
             "%s_submitted_tasks" % self.name: self.submitted_tasks,
             "%s_queued_jobs" % self.name: len(self.queue),
             "%s_uptime" % self.name: perf_counter() - self.started
         })
예제 #18
0
 def _maybe_rotate(self):
     """
     Rotate and flush slots when necessary
     :return:
     """
     t = perf_counter()
     if not self.next_shift:
         # First call
         self.next_shift = t + self.ttl
         return
     if self.next_shift > t:
         return  # Not expired yet
     delta = t - self.next_shift
     self.next_shift = t + math.ceil(delta / self.ttl) * self.ttl
     to_rotate = min(len(self.slots), int(math.ceil(delta / self.ttl)))
     for _ in range(to_rotate):
         self.slots.appendleft(self.slots.pop())
         self.slots[0].reset()
예제 #19
0
파일: span.py 프로젝트: gabrielat/noc
 def __exit__(self, exc_type, exc_val, exc_tb):
     global spans
     if self.is_sampled or self.hist or self.quantile:
         self.duration = int((perf_counter() - self.ts0) * US)
     if self.hist:
         self.hist.register(self.duration)
     if self.quantile:
         self.quantile.register(self.duration)
     if config.features.forensic and hasattr(self, "forensic_id"):
         # N.B. config.features.forensic may be changed during span
         forensic_logger.info("[<%s]", self.forensic_id)
     if not self.is_sampled:
         return
     if exc_type and not self.error_text and not self.is_ignorable_error(exc_type):
         self.error_code = ERR_UNKNOWN
         self.error_text = str(exc_val).strip("\t").replace("\t", " ").replace("\n", " ")
     lt = time.localtime(self.start)
     ft = time.strftime("%Y-%m-%d %H:%M:%S", lt)
     span = SpanItem(
         date=ft.split(" ")[0],
         ts=ft,
         ctx=self.span_context,
         id=self.span_id,
         parent=self.parent,
         server=str(self.server or ""),
         service=str(self.service or ""),
         client=str(self.client or ""),
         duration=self.duration,
         error_code=self.error_code or 0,
         error_text=str(self.error_text or ""),
         sample=self.sample,
         in_label=str(self.in_label or ""),
         out_label=str(self.out_label or ""),
     )
     with span_lock:
         spans += [span]
     if self.span_parent == DEFAULT_ID:
         del tls.span_parent
         del tls.span_context
     else:
         tls.span_parent = self.span_parent
     metrics["spans"] += 1
     if self.suppress_trace:
         return True
예제 #20
0
 def report(self):
     t = perf_counter()
     if self.last_ts:
         r = []
         for m in CR:
             ov = self.stats.get(m, 0)
             nv = metrics[m].value
             r += ["%s: %d" % (m[7:], nv - ov)]
             self.stats[m] = nv
         nt = metrics[CR_PROCESSED].value
         ot = self.stats.get(CR_PROCESSED, 0)
         total = nt - ot
         self.stats[CR_PROCESSED] = nt
         dt = (t - self.last_ts)
         if total:
             speed = total / dt
             self.logger.info("REPORT: %d events in %.2fms. %.2fev/s (%s)" %
                              (total, dt * 1000, speed, ", ".join(r)))
     self.last_ts = t
예제 #21
0
 def __init__(self, max_workers, idle_timeout=DEFAULT_IDLE_TIMEOUT,
              shutdown_timeout=DEFAULT_SHUTDOWN_TIMEOUT,
              name=None):
     self.max_workers = max_workers
     self.threads = set()
     self.mutex = threading.Lock()
     self.queue = deque()
     self.to_shutdown = False
     self.idle_timeout = idle_timeout or None
     self.shutdown_timeout = shutdown_timeout or None
     self.submitted_tasks = 0
     self.worker_id = itertools.count()
     self.name = name or "threadpool"
     self.done_event = None
     self.done_future = None
     self.started = perf_counter()
     self.waiters = []
     if config.thread_stack_size:
         threading.stack_size(config.thread_stack_size)
예제 #22
0
 def get_mon_data(self):
     """
     Returns monitoring data
     """
     r = {
         "status": self.get_mon_status(),
         "service": self.name,
         "instance": str(self.service_id),
         "node": config.node,
         "pid": self.pid,
         # Current process uptime
         "uptime": perf_counter() - self.start_time
     }
     if self.pooled:
         r["pool"] = config.pool
     if self.executors:
         for x in self.executors:
             self.executors[x].apply_metrics(r)
     apply_metrics(r)
     return r
예제 #23
0
파일: base.py 프로젝트: ewwwcha/noc
 def __init__(self):
     set_service(self)
     sys.excepthook = excepthook
     self.ioloop = None
     self.logger = None
     self.service_id = str(uuid.uuid4())
     self.executors = {}
     self.start_time = perf_counter()
     self.pid = os.getpid()
     self.nsq_readers = {}  # handler -> Reader
     self.nsq_writer = None
     self.startup_ts = None
     # channel, fields -> data
     self._metrics = defaultdict(list)
     self.metrics_lock = threading.Lock()
     self.metrics_callback = None
     self.dcs = None
     # Effective address and port
     self.server = None
     self.address = None
     self.port = None
     self.is_active = False
     self.close_callbacks = []
     # Can be initialized in subclasses
     self.scheduler = None
     # Depends on config
     topo = config.get_ch_topology_type()
     if topo == CH_UNCLUSTERED:
         self.register_metrics = self.register_unclustered_metrics
     elif topo == CH_REPLICATED:
         self.register_metrics = self.register_replicated_metrics
     elif topo == CH_SHARDED:
         self.register_metrics = self.register_sharded_metrics
         self.total_weight = 0
         self.get_shard = self.get_sharding_function()
     else:
         self.die("Invalid ClickHouse cluster topology")
     # NSQ Topics
     self.topic_queues = {}  # name -> TopicQueue()
     self.topic_queue_lock = threading.Lock()
     self.topic_shutdown = {}  # name -> event
예제 #24
0
파일: ping.py 프로젝트: skripkar/noc
 def parse_reply(self, msg, addr):
     """
     Returns status, address, request_id, sequence, rtt
     """
     # (ver_tc_flow, plen, hdr, ttl) = struct.unpack("!IHBB", msg[:8])
     # src_ip = msg[64: 192]
     # @todo: Access IPv6 header
     # src_ip = None
     # ttl = None
     # icmp_header = msg[40:48]
     icmp_header = msg[:8]
     (icmp_type, icmp_code, icmp_checksum, req_id,
      seq) = ICMP_STRUCT.unpack(icmp_header)
     payload = msg[8:]
     rtt = None
     if len(payload) >= 8:
         t0 = TS_STRUCT.unpack(payload[:8])[0]
         rtt = perf_counter() - t0
     if icmp_type == ICMPv6_ECHOREPLY:
         return True, addr, req_id, seq, rtt
     else:
         return None, None, None, None, None
예제 #25
0
def test_perf_counter():
    DELTA = 1.0
    t0 = perf_counter()
    time.sleep(DELTA)
    t1 = perf_counter()
    assert t1 - t0 >= DELTA
예제 #26
0
 def check_timer(self, name):
     t = perf_counter()
     yield
     self.check_timings += [(name, perf_counter() - t)]
예제 #27
0
    def extract(self):
        def q(s):
            if s == "" or s is None:
                return ""
            elif isinstance(s, six.text_type):
                return s.encode("utf-8")
            else:
                return str(s)

        # Fetch data
        self.logger.info("Extracting %s from %s", self.name, self.system.name)
        t0 = perf_counter()
        data = []
        n = 0
        seen = set()
        for row in self.iter_data():
            if not self.filter(row):
                continue
            row = self.clean(row)
            if row[0] in seen:
                if not self.suppress_deduplication_log:
                    self.logger.error("Duplicated row truncated: %r", row)
                continue
            else:
                seen.add(row[0])
            data += [[q(x) for x in row]]
            n += 1
            if n % self.REPORT_INTERVAL == 0:
                self.logger.info("   ... %d records", n)
        dt = perf_counter() - t0
        speed = n / dt
        self.logger.info("%d records extracted in %.2fs (%d records/s)", n, dt,
                         speed)
        # Sort
        data.sort()
        # Write
        f = self.get_new_state()
        writer = csv.writer(f)
        writer.writerows(data)
        f.close()
        if self.fatal_problems or self.quality_problems:
            self.logger.warning(
                "Detect problems on extracting, fatal: %d, quality: %d",
                len(self.fatal_problems),
                len(self.quality_problems),
            )
            self.logger.warning("Line num\tType\tProblem string")
            for p in self.fatal_problems:
                self.logger.warning(
                    "Fatal problem, line was rejected: %s\t%s\t%s" %
                    (p.line, p.p_class, p.message))
            for p in self.quality_problems:
                self.logger.warning(
                    "Data quality problem in line:  %s\t%s\t%s" %
                    (p.line, p.p_class, p.message))
            # Dump problem to file
            try:
                f = self.get_problem_file()
                writer = csv.writer(f, delimiter=";")
                for p in itertools.chain(self.quality_problems,
                                         self.fatal_problems):
                    writer.writerow([str(c).encode("utf-8") for c in p.row] + [
                        "Fatal problem, line was rejected" if p.
                        is_rej else "Data quality problem"
                    ] + [p.message.encode("utf-8")])
            except IOError as e:
                self.logger.error("Error when saved problems %s", e)
            finally:
                f.close()
        else:
            self.logger.info("No problems detected")
예제 #28
0
파일: job.py 프로젝트: skripkar/noc
    def run(self):
        with Span(server=self.scheduler.name,
                  service=self.attrs[self.ATTR_CLASS],
                  sample=self.attrs.get(self.ATTR_SAMPLE, 0),
                  in_label=self.attrs.get(self.ATTR_KEY, "")):
            self.start_time = perf_counter()
            if self.is_retries_exceeded():
                self.logger.info("[%s|%s] Retries exceeded. Remove job",
                                 self.name, self.attrs[Job.ATTR_ID])
                self.remove_job()
                return
            self.logger.info(
                "[%s] Starting at %s (Lag %.2fms)", self.name,
                self.scheduler.scheduler_id,
                total_seconds(datetime.datetime.now() -
                              self.attrs[self.ATTR_TS]) * 1000.0)
            # Run handler
            status = self.E_EXCEPTION
            delay = None
            with Span(service="job.dereference"):
                try:
                    ds = self.dereference()
                    can_run = self.can_run()
                except Exception as e:
                    self.logger.error("Unknown error during dereference: %s",
                                      e)
                    ds = None
                    can_run = False

            if ds:
                with Span(service="job.run"):
                    if can_run:
                        try:
                            data = self.attrs.get(self.ATTR_DATA) or {}
                            result = self.handler(**data)
                            if tornado.gen.is_future(result):
                                # Wait for future
                                result = yield result
                            status = self.E_SUCCESS
                        except RetryAfter as e:
                            self.logger.info("Retry after %ss: %s", e.delay, e)
                            status = self.E_RETRY
                            delay = e.delay
                        except self.failed_exceptions:
                            status = self.E_FAILED
                        except Exception:
                            error_report()
                            status = self.E_EXCEPTION
                    else:
                        self.logger.info("Deferred")
                        status = self.E_DEFERRED
            elif ds is not None:
                self.logger.info("Cannot dereference")
                status = self.E_DEREFERENCE
            self.duration = perf_counter() - self.start_time
            self.logger.info("Completed. Status: %s (%.2fms)",
                             self.STATUS_MAP.get(status, status),
                             self.duration * 1000)
            # Schedule next run
            if delay is None:
                with Span(service="job.schedule_next"):
                    self.schedule_next(status)
            else:
                with Span(service="job.schedule_retry"):
                    # Retry
                    if self.context_version:
                        ctx = self.context or None
                        ctx_key = self.get_context_cache_key()
                    else:
                        ctx = None
                        ctx_key = None
                    self.scheduler.set_next_run(
                        self.attrs[self.ATTR_ID],
                        status=status,
                        ts=datetime.datetime.now() +
                        datetime.timedelta(seconds=delay),
                        duration=self.duration,
                        context_version=self.context_version,
                        context=ctx,
                        context_key=ctx_key)
예제 #29
0
파일: model.py 프로젝트: skripkar/noc
 def query(cls, query, user=None, dry_run=False):
     """
     Execute query and return result
     :param query: dict of
         "fields": list of dicts
             *expr -- field expression
             *alias -- resulting name
             *group -- nth field in GROUP BY expression, starting from 0
             *order -- nth field in ORDER BY expression, starting from 0
             *desc -- sort in descending order, if true
         "filter": expression
         "having": expression
         "limit": N -- limit to N rows
         "offset": N -- skip first N rows
         "sample": 0.0-1.0 -- randomly select rows
         @todo: group by
         @todo: order by
     :param user: User doing query
     :param dry_run: Do not query, only return it.
     :return:
     """
     # Get field expressions
     fields = query.get("fields", [])
     if not fields:
         return None
     transformed_query = cls.transform_query(query, user)
     fields_x = []
     aliases = []
     group_by = {}
     order_by = {}
     for i, f in enumerate(fields):
         if isinstance(f["expr"], six.string_types):
             default_alias = f["expr"]
             f["expr"] = {"$field": f["expr"]}
         else:
             default_alias = "f%04d" % i
         alias = f.get("alias", default_alias)
         if not f.get("hide"):
             aliases += [alias]
             fields_x += ["%s AS %s" % (to_sql(f["expr"], cls), escape_field(alias))]
         if "group" in f:
             group_by[int(f["group"])] = alias
         if "order" in f:
             if "desc" in f and f["desc"]:
                 order_by[int(f["order"])] = "%s DESC" % alias
             else:
                 order_by[int(f["order"])] = alias
     if transformed_query is None:
         # Access denied
         r = []
         dt = 0.0
         sql = ["SELECT %s FROM %s WHERE 0 = 1" % (", ".join(fields_x), cls._get_db_table())]
     else:
         # Get where expressions
         filter_x = to_sql(transformed_query.get("filter", {}))
         filter_h = to_sql(transformed_query.get("having", {}))
         # Generate SQL
         sql = ["SELECT "]
         sql += [", ".join(fields_x)]
         sql += ["FROM %s" % cls._get_db_table()]
         sample = query.get("sample")
         if sample:
             sql += ["SAMPLE %s" % float(sample)]
         if filter_x:
             sql += ["WHERE %s" % filter_x]
         # GROUP BY
         if group_by:
             sql += ["GROUP BY %s" % ", ".join(group_by[v] for v in sorted(group_by))]
         # HAVING
         if filter_h:
             sql += ["HAVING %s" % filter_h]
         # ORDER BY
         if order_by:
             sql += ["ORDER BY %s" % ", ".join(order_by[v] for v in sorted(order_by))]
         # LIMIT
         if "limit" in query:
             if "offset" in query:
                 sql += ["LIMIT %d, %d" % (query["offset"], query["limit"])]
             else:
                 sql += ["LIMIT %d" % query["limit"]]
         sql = " ".join(sql)
         # Execute query
         ch = connection()
         t0 = perf_counter()
         if dry_run:
             return sql
         r = ch.execute(sql)
         dt = perf_counter() - t0
     return {
         "fields": aliases,
         "result": r,
         "duration": dt,
         "sql": sql
     }
예제 #30
0
 def handler(self):
     # type: () -> Tuple[int, Dict]
     # Decode request
     try:
         req = ujson.loads(self.request.body)
     except ValueError:
         return 400, {"status": False, "error": "Cannot decode JSON"}
     # Validate
     try:
         req = Request.clean(req)
     except ValueError as e:
         return 400, {"status": False, "error": "Bad request: %s" % e}
     # Find start of path
     try:
         with Span(in_label="start_of_path"):
             start, start_iface = self.get_object_and_interface(
                 **req["from"])
     except ValueError as e:
         return 404, {
             "status": False,
             "error": "Failed to find start of path: %s" % e
         }
     # Find end of path
     if "level" in req["to"]:
         goal = ManagedObjectLevelGoal(req["to"]["level"])
         end_iface = None
     else:
         try:
             with Span(in_label="end_of_path"):
                 end, end_iface = self.get_object_and_interface(**req["to"])
             goal = ManagedObjectGoal(end)
         except ValueError as e:
             return 404, {
                 "status": False,
                 "error": "Failed to find end of path: %s" % e
             }
     # Trace the path
     if req.get("config"):
         max_depth = req["config"]["max_depth"]
         n_shortest = req["config"]["n_shortest"]
     else:
         max_depth = MAX_DEPTH_DEFAULT
         n_shortest = N_SHORTEST_DEFAULT
     error = None
     with Span(in_label="find_path"):
         t0 = perf_counter()
         try:
             paths = list(
                 self.iter_paths(
                     start,
                     start_iface,
                     goal,
                     end_iface,
                     constraints=self.get_constraints(
                         start, start_iface, req.get("constraints")),
                     max_depth=max_depth,
                     n_shortest=n_shortest,
                 ))
         except ValueError as e:
             error = str(e)
         dt = perf_counter() - t0
     if error:
         return 404, {"status": False, "error": e, "time": dt}
     return 200, {"status": True, "paths": paths, "time": dt}