Beispiel #1
0
def ensure_all_pm_scopes():
    from noc.core.clickhouse.connect import connection

    if not config.clickhouse.cluster or config.clickhouse.cluster_topology == "1":
        # Standalone configuration
        ensure_pm_scopes()
        return
    # Replicated configuration
    ch = connection(read_only=False)
    for host, port in ch.execute("SELECT host_address, port FROM system.clusters WHERE cluster = %s",
                                 args=[config.clickhouse.cluster]):
        c = connection(host=host, port=port, read_only=False)
        ensure_pm_scopes(c)
Beispiel #2
0
    def get_object_metrics(start, stop):
        """

        :param start:
        :type stop: datetime.datetime
        :param stop:
        :type stop: datetime.datetime
        :return:
        """
        r = {}
        bi_map = {
            str(bi_id): mo_id for mo_id, bi_id in ManagedObject.objects.values_list("id", "bi_id")
        }
        ch = connection()
        for row in ch.execute(
            "SELECT managed_object, sum(stp_topology_changes_delta) "
            "FROM routing WHERE ts > '%s' and ts < '%s' GROUP BY managed_object"
            % (
                start.replace(microsecond=0).isoformat(sep=" "),
                stop.replace(microsecond=0).isoformat(sep=" "),
            )
        ):  # delta
            r[bi_map[row[0]]] = {"n_stp_topo_changes": row[1]}
        del bi_map
        return r
Beispiel #3
0
 def connect(self):
     """
     Connect to database
     :return:
     """
     self.connect = connection(host=self.host,
                               port=self.port,
                               read_only=False)
Beispiel #4
0
    def ensure_table(self, connect=None):
        """
        Ensure table is exists
        :return: True, if table has been changed
        """
        from noc.core.clickhouse.connect import connection

        def ensure_columns(table_name):
            c = False
            # Alter when necessary
            existing = {}
            for name, type in ch.execute(
                    """
                SELECT name, type
                FROM system.columns
                WHERE
                  database=%s
                  AND table=%s
                """,
                [config.clickhouse.db, table_name],
            ):
                existing[name] = type
            after = None
            for f, t in self.iter_fields():
                if f not in existing:
                    ch.execute(
                        post="ALTER TABLE %s ADD COLUMN %s %s AFTER %s" %
                        (table_name, f, t, after))
                    c = True
                after = f
                if f in existing and existing[f] != t:
                    print("Warning! Type mismatch for column %s: %s <> %s" %
                          (f, existing[f], t))
                    print(
                        "Set command manually: ALTER TABLE %s MODIFY COLUMN %s %s"
                        % (table_name, f, t))
            return c

        changed = False
        ch = connect or connection(read_only=False)
        if not ch.has_table(self._get_raw_db_table()):
            # Create new table
            ch.execute(post=self.get_create_sql())
            changed = True
        else:
            changed |= ensure_columns(self._get_raw_db_table())
        # Check for distributed table
        if config.clickhouse.cluster:
            if not ch.has_table(self.table_name):
                ch.execute(post=self.get_create_distributed_sql())
                changed = True
            else:
                changed |= ensure_columns(self.table_name)
        return changed
Beispiel #5
0
    def do_query_ch(self, moss, query_map, f_date, to_date):
        n = 0
        client = connection()

        mos_name = sorted(moss)
        query = self.get_query_ch(query_map, f_date, to_date)
        self.CHUNK_SIZE = 4000
        while mos_name:
            mos_name, m_r = mos_name[self.CHUNK_SIZE:], mos_name[:self.
                                                                 CHUNK_SIZE]
            for row in client.execute(query % ", ".join(m_r)):
                yield row[0:2], row[2:]
            n += 1
Beispiel #6
0
 def __init__(self, mos_ids, f_date, to_date, columns=None):
     super(ReportMetrics, self).__init__(mos_ids)
     self.from_date = f_date
     self.to_date = to_date
     self.ch_client = connection()
     if not (self.TABLE_NAME and self.SELECT_QUERY_MAP):
         raise NotImplementedError
     if columns and isinstance(columns, list):
         for c in set(self.ATTRS) - set(columns):
             self.ATTRS.pop(c)
     elif columns and isinstance(columns, OrderedDict):
         self.ATTRS = columns
     self.unknown_value = ([
         [""] * (len(self.SELECT_QUERY_MAP) + len(self.KEY_FIELDS))
     ], )
Beispiel #7
0
 def handle_apply(self,
                  host=None,
                  port=None,
                  dry_run=True,
                  *args,
                  **options):
     connect()
     read_only = dry_run
     ch = connection(host, port, read_only=read_only)
     today = datetime.date.today()
     # Get partitions
     parts = self.get_parts(ch)
     #
     partition_claimed = []
     claimed_bytes = 0
     for p in CHPolicy.objects.filter(is_active=True).order_by("table"):
         table_claimed = 0
         if not p.ttl:
             continue  # Disabled
         deadline = today - datetime.timedelta(days=p.ttl)
         is_dry = dry_run or p.dry_run
         self.print("# Table %s deadline %s%s" %
                    (p.table, deadline.isoformat(),
                     " (Dry Run)" if is_dry else ""))
         for pi in parts[p.table]:
             if pi.max_date >= deadline:
                 continue
             self.print(
                 "  Removing partition %s (%s -- %s, %d rows, %d bytes)" %
                 (pi.partition, pi.min_date, pi.max_date, pi.rows,
                  pi.bytes))
             table_claimed += pi.bytes
             if not is_dry:
                 partition_claimed += [(p.table, pi.partition)]
         self.print("  Total %d bytes to be reclaimed" % table_claimed)
         claimed_bytes += table_claimed
     if partition_claimed:
         self.print("Claimed data will be Loss..\n")
         for i in reversed(range(1, 10)):
             self.print("%d\n" % i)
             time.sleep(1)
         for c in partition_claimed:
             table, part = c[0], c[1]
             if table.startswith(".inner"):
                 table = table[7:]
             ch.execute("ALTER TABLE %s.%s DROP PARTITION '%s'" %
                        (config.clickhouse.db, table, part))
         self.print("# Done. %d bytes to be reclaimed" % claimed_bytes)
Beispiel #8
0
 def get_data(self):
     # Get span data
     ch = connection()
     data = [
         Span(*r) for r in ch.execute(
             """
           SELECT
             ts, id, parent, server, service, client,
             duration, sample, error_code,
             error_text, in_label, out_label
           FROM span
           WHERE ctx = %s""",
             [int(self.id)],
         )
     ]
     # Build hierarchy
     smap = {s.id: s for s in data}
     root = None
     for s in data:
         if s.parent:
             smap[s.parent].children += [s]
         else:
             root = s
     # Set width
     for s in data:
         if s.parent:
             d = s.ts - root.ts
             dt = d.seconds * 1000000 + d.microseconds
             s.left = self.GRAPH_WIDTH * dt // root.duration
             s.width = int(
                 float(self.GRAPH_WIDTH) /
                 (float(root.duration) / float(s.duration)))
         else:
             s.left = 0
             s.width = self.GRAPH_WIDTH
     # Flatten
     spans = self.flatten_spans(root)
     #
     return {"context": int(self.id), "root": root, "spans": spans}
Beispiel #9
0
    def get_data(self,
                 request,
                 interval=1,
                 repo_format=0,
                 from_date=None,
                 to_date=None,
                 **kwargs):
        # Date Time Block
        if from_date:
            from_date = datetime.datetime.strptime(from_date, "%d.%m.%Y")
        elif interval:
            from_date = datetime.datetime.now() - datetime.timedelta(
                days=int(interval))
        else:
            from_date = datetime.datetime.now() - datetime.timedelta(days=1)

        if to_date:
            to_date = datetime.datetime.strptime(to_date, "%d.%m.%Y")
            if from_date == to_date:
                to_date = from_date + datetime.timedelta(days=1)
        elif interval:
            to_date = from_date + datetime.timedelta(days=int(interval))
        else:
            to_date = from_date + datetime.timedelta(days=1)
        columns = [
            _("Server"),
            _("Service"),
            _("Request count"),
            _("Success request count"),
            _("Failed request count"),
            _("Success request (%)"),
            _("Q1 (ms)"),
            _("Q2 (ms)"),
            _("Q3 (ms)"),
            _("p95 (ms)"),
            _("max (ms)")
        ]
        if repo_format == "1":
            columns = [
                _("Timestamp"),
                _("Server"),
                _("Service"),
                _("Managed object"),
                _("TT ID"),
                _("Error code"),
                _("Error text")
            ]
        ts_from_date = time.mktime(from_date.timetuple())
        ts_to_date = time.mktime(to_date.timetuple())

        tt_systems = TTSystem.objects.filter().scalar("name")
        # Manged Object block

        q1 = """select server, service, count(), round(quantile(0.25)(duration), 0)/1000 as q1,
                                        round(quantile(0.5)(duration), 0)/1000 as q2,
                                        round(quantile(0.75)(duration), 0)/1000 as q3,
                                        round(quantile(0.95)(duration),0)/1000 as p95,
                                        round(max(duration),0)/1000 as max from span where %s
                                        group by server, service"""

        q2 = """select server, service, error_code, count(), avg(duration)
                from span where %s group by server, service, error_code"""

        q3 = """select ts, server, service, in_label, in_label, error_code, error_text from span
                where service IN ('create_massive_damage_outer', 'change_massive_damage_outer_close') and
                      error_code <> 0 and %s"""

        q_where = ["server IN ('%s')" % "', '".join(tt_systems)]
        # q_where = ["managed_object IN (%s)" % ", ".join(mo_bi_dict.keys())]
        q_where += [
            "(date >= toDate(%d)) AND (ts >= toDateTime(%d) AND ts <= toDateTime(%d))"
            % (ts_from_date, ts_from_date, ts_to_date)
        ]
        r = []
        ch = connection()
        if repo_format == "1":
            aa = {
                aa.escalation_tt.split(":")[-1]: aa
                for aa in ArchivedAlarm.objects.filter(
                    clear_timestamp__gte=from_date,
                    clear_timestamp__lte=to_date,
                    escalation_tt__exists=True)
            }
            query = q3 % " and ".join(q_where)
            for row in ch.execute(query):
                if row[2] in ["create_massive_damage_outer"]:
                    row[2] = u"Создание ТТ"
                    try:
                        row[3] = ManagedObject.objects.get(
                            tt_system_id=int(row[3]))
                        row[4] = ""
                    except ManagedObject.DoesNotExist:
                        pass
                    except ManagedObject.MultipleObjectsReturned:
                        row[3] = ManagedObject.objects.get(tt_system_id=int(
                            row[3]),
                                                           is_managed=True)
                        row[4] = ""
                elif row[2] in ["change_massive_damage_outer_close"]:
                    row[2] = u"Закрытие ТТ"
                    row[4] = row[3]
                    row[3] = aa[
                        row[3]].managed_object if row[3] in aa else row[3]
                else:
                    continue
                r += [row]
        else:
            query = q1 % " and ".join(q_where)
            # (server, service)
            tt_s = {}
            for row in ch.execute(query):
                tt_s[(row[0], row[1])] = [row[2]] + [0, 0, 0] + row[3:]
            query = q2 % " and ".join(q_where)
            for row in ch.execute(query):
                if row[2] == "0":
                    tt_s[(row[0], row[1])][1] = row[3]
                else:
                    tt_s[(row[0], row[1])][2] += int(row[3])

            r += [
                SectionRow(name="Report from %s to %s" %
                           (from_date.strftime("%d.%m.%Y %H:%M"),
                            to_date.strftime("%d.%m.%Y %H:%M")))
            ]
            for line in sorted(tt_s, key=lambda x: x[0]):
                data = list(line)
                data += tt_s[line]
                data[5] = round((float(data[3]) / float(data[2])) * 100.0, 2)
                r += [data]

        return self.from_dataset(title=self.title,
                                 columns=columns,
                                 data=r,
                                 enumerate=True)
Beispiel #10
0
 def handler(self):
     self.logger.info("Checking %s topology", self.name)
     # Get segment hierarchy
     segments = set(self.object.get_nested_ids())
     # Get managed objects and id <-> bi_id mappings
     bi_map = {}  # bi_id -> mo
     for mo in ManagedObject.objects.filter(
             segment__in=[str(x) for x in segments]
     ):
         bi_map[str(mo.bi_id)] = mo
     if not bi_map:
         self.logger.info("Empty segment tree. Skipping")
         return
     # Fetch latest MAC tables snapshots from ClickHouse
     # @todo: Apply vlan restrictions
     t0 = datetime.datetime.now() - datetime.timedelta(seconds=self.MAC_WINDOW)
     t0 = t0.replace(microsecond=0)
     SQL = """SELECT managed_object, mac, argMax(ts, ts), argMax(interface, ts)
     FROM mac
     WHERE
       date >= toDate('%s')
       AND ts >= toDateTime('%s')
       AND managed_object IN (%s)
     GROUP BY ts, managed_object, mac
     """ % (t0.date().isoformat(), t0.isoformat(sep=" "),
            ", ".join(bi_map))
     ch = connection()
     # Fill FIB
     mtable = []  # mo_id, mac, iface, ts
     last_ts = {}  # mo -> ts
     for mo_bi_id, mac, ts, iface in ch.execute(post=SQL):
         mo = bi_map.get(mo_bi_id)
         if mo:
             mtable += [[mo, MAC(int(mac)), iface, ts]]
             last_ts[mo] = max(ts, last_ts.get(mo, ts))
     # Filter out aged MACs
     mtable = [m for m in mtable if m[3] == last_ts[m[0]]]
     # Resolve objects
     macs = set(x[1] for x in mtable)
     if not macs:
         self.logger.info("No MAC addresses collected. Stopping")
         return
     object_macs = DiscoveryID.find_objects(macs)
     if not object_macs:
         self.logger.info("Cannot resolve any MAC addresses. Stopping")
         return
     # Build FIB
     fib = {}  # object -> interface -> {seen objects}
     for mo, mac, iface, ts in mtable:
         ro = object_macs.get(mac)
         if not ro:
             continue
         if mo not in fib:
             fib[mo] = {}
         if iface in fib[mo]:
             fib[mo][iface].add(ro)
         else:
             fib[mo][iface] = {ro}
     # Find uplinks and coverage
     coverage = {}  # mo -> covered objects
     uplinks = {}  # mo -> uplink interface
     up_fib = {}  # mo -> {seen via uplinks}
     for mo in fib:
         coverage[mo] = {mo}
         for iface in fib[mo]:
             if self.is_uplink(mo, fib[mo][iface], segments):
                 uplinks[mo] = iface
                 up_fib[mo] = fib[mo][iface]
             else:
                 coverage[mo] |= fib[mo][iface]
         if mo not in uplinks:
             self.logger.info(
                 "[%s] Cannot detect uplinks. Topology may be imprecise",
                 mo.name
             )
     # Dump FIB
     if self.logger.isEnabledFor(logging.DEBUG):
         for mo in fib:
             self.logger.debug("%s:", mo.name)
             if mo in uplinks:
                 self.logger.debug("  * %s: %s", uplinks[mo], ", ".join(x.name for x in up_fib[mo]))
             else:
                 self.logger.debug("    Warning: No uplinks. Topology may be imprecise")
             for iface in fib[mo]:
                 self.logger.debug("    %s: %s", iface, ", ".join(x.name for x in fib[mo][iface]))
     # Build topology
     for mo in fib:
         for iface in fib[mo]:
             if iface == uplinks.get(mo):
                 continue
             for ro in fib[mo][iface]:
                 cvr = coverage.get(ro)
                 if not cvr:
                     cvr = {ro}
                     coverage[ro] = cvr
                 if not fib[mo][iface] - cvr:
                     # All objects from mo:iface are seen via ro
                     uplink = uplinks.get(ro)
                     if uplink:
                         self.confirm_link(mo, iface, ro, uplink)
                         break
                     else:
                         self.logger.info(
                             "[%s] No uplinks. Cannot link to %s:%s. Topology may be imprecise",
                             ro.name, mo.name, iface
                         )
Beispiel #11
0
    def get_metrics(mos):
        from_date = datetime.datetime.now() - datetime.timedelta(days=1)
        from_date = from_date.replace(microsecond=0)
        # mo = self.object
        bi_map = {str(mo.bi_id): mo for mo in mos}
        SQL = """SELECT managed_object, arrayStringConcat(path) as iface, argMax(ts, ts), argMax(load_in, ts), argMax(load_out, ts), argMax(errors_in, ts), argMax(errors_out, ts)
                FROM interface
                WHERE
                  date >= toDate('%s')
                  AND ts >= toDateTime('%s')
                  AND managed_object IN (%s)
                GROUP BY managed_object, iface
                """ % (from_date.date().isoformat(),
                       from_date.isoformat(sep=" "), ", ".join(bi_map))
        ch = connection()
        mtable = []  # mo_id, mac, iface, ts
        last_ts = {}  # mo -> ts
        metric_map = {
            mo: {
                "interface": defaultdict(dict),
                "object": defaultdict(dict)
            }
            for mo in mos
        }
        msd = {ms.id: ms.table_name for ms in MetricScope.objects.filter()}
        mts = {
            str(mt.id): (msd[mt.scope.id], mt.field_name, mt.name)
            for mt in MetricType.objects.all()
        }
        # Interface Metrics
        for mo_bi_id, iface, ts, load_in, load_out, errors_in, errors_out in ch.execute(
                post=SQL):
            mo = bi_map.get(mo_bi_id)
            if mo:
                mtable += [[mo, iface, ts, load_in, load_out]]
                metric_map[mo]["interface"][iface] = {
                    "load_in": int(load_in),
                    "load_out": int(load_out),
                    "errors_in": int(errors_in),
                    "errors_out": int(errors_out)
                }
                last_ts[mo] = max(ts, last_ts.get(mo, ts))

        # Object Metrics
        # object_profiles = set(mos.values_list("object_profile", flat=True))
        object_profiles = set(mo.object_profile.id for mo in mos)
        mmm = set()
        op_fields_map = defaultdict(list)
        for op in ManagedObjectProfile.objects.filter(id__in=object_profiles):
            for mt in op.metrics:
                mmm.add(mts[mt["metric_type"]])
                op_fields_map[op.id] += [mts[mt["metric_type"]][1]]

        for table, fields in itertools.groupby(sorted(mmm, key=lambda x: x[0]),
                                               key=lambda x: x[0]):
            # tb_fields = [f[1] for f in fields]
            # mt_name = [f[2] for f in fields]
            fields = list(fields)
            SQL = """SELECT managed_object, argMax(ts, ts), %s
                  FROM %s
                  WHERE
                    date >= toDate('%s')
                    AND ts >= toDateTime('%s')
                    AND managed_object IN (%s)
                  GROUP BY managed_object
                  """ % (", ".join(
                ["argMax(%s, ts) as %s" % (f[1], f[1])
                 for f in fields]), table, from_date.date().isoformat(),
                         from_date.isoformat(sep=" "), ", ".join(bi_map))
            # print SQL
            for result in ch.execute(post=SQL):
                mo_bi_id, ts = result[:2]
                mo = bi_map.get(mo_bi_id)
                i = 0
                for r in result[2:]:
                    f_name = fields[i][2]
                    mtable += [[mo, ts, r]]
                    metric_map[mo]["object"][f_name] = r
                    last_ts[mo] = max(ts, last_ts.get(mo, ts))
                    i += 1
        return metric_map
Beispiel #12
0
CH_USER = config.clickhouse.rw_user
# CH_USER = "******"
CH_PASSWORD = config.clickhouse.rw_password
# END data for old data
# END_DATE = datetime.datetime(2021, 4, 4)
END_DATE = None
# For speedup if used cluster replica data will be query between replica
# Example: [("10.10.10.1", "10.10.10.2")] - (replica1, replica2)
CH_REPLICAS = []
#
# Split query by month
# If needed one query - MIGRATE_CHUNK great than MIGRATE_DEPTH
MIGRATE_DEPTH = 120
MIGRATE_CHUNK = 30

client = connection()


def fix():
    if CH_REPLICAS:
        # For replicated schema
        for rep1, rep2 in CH_REPLICAS:
            rep1_migrate = []
            rep2_migrate = []
            for ms in MetricScope.objects.filter():
                for start, stop in iter_time_interval():
                    query = get_insert_query(ms, start, stop, remote=rep2)
                    if not query:
                        continue
                    # print(f'clickhouse-client -h {rep1} --query="{query}"')
                    rep1_migrate += [f'clickhouse-client -h {rep1} --query="{query}"']
Beispiel #13
0
    def ensure_table(self, connect=None):
        """
        Ensure table is exists
        :return: True, if table has been changed
        """
        from noc.core.clickhouse.connect import connection

        def ensure_column(table_name, column):
            """
            If path not exists on column - new schema
            :param table_name:
            :return:
            """
            return bool(
                ch.execute(
                    """
                SELECT 1
                FROM system.columns
                WHERE
                  database=%s
                  AND table=%s
                  AND name=%s
                """,
                    [config.clickhouse.db, table_name, column],
                ))

        def ensure_columns(table_name):
            c = False
            # Alter when necessary
            existing = {}
            for name, type in ch.execute(
                    """
                SELECT name, type
                FROM system.columns
                WHERE
                  database=%s
                  AND table=%s
                """,
                [config.clickhouse.db, table_name],
            ):
                existing[name] = type
            after = None
            for f, t, me, de in self.iter_fields():
                if f not in existing:
                    ch.execute(
                        post=
                        f"ALTER TABLE {table_name} ADD COLUMN {f} {t} {me} {de} AFTER {after}"
                    )
                    c = True
                after = f
                if f in existing and existing[f] != t:
                    print(
                        f"Warning! Type mismatch for column {f}: {existing[f]} <> {t}"
                    )
                    print(
                        f"Set command manually: ALTER TABLE {table_name} MODIFY COLUMN {f} {t}"
                    )
            return c

        changed = False
        ch = connect or connection(read_only=False)
        is_cluster = bool(config.clickhouse.cluster)
        table = self._get_db_table()
        raw_table = self._get_raw_db_table()
        dist_table = self._get_distributed_db_table()
        # Legacy migration
        if ch.has_table(table) and not ch.has_table(raw_table):
            # Legacy scheme, data for non-clustered installations has been written
            # to table itself. Move to raw_*
            ch.rename_table(table, raw_table)
            changed = True
        # Old schema
        if ensure_column(raw_table, "path"):
            # Old schema, data table will be move to old_noc db for save data.
            ch.ensure_db(OLD_PM_SCHEMA_TABLE)
            ch.rename_table(raw_table, f"{OLD_PM_SCHEMA_TABLE}.{raw_table}")
        # Ensure raw_* table
        if ch.has_table(raw_table):
            # raw_* table exists, check columns
            changed |= ensure_columns(raw_table)
        else:
            # Create new table
            ch.execute(post=self.get_create_sql())
            changed = True
        # For cluster mode check d_* distributed table
        if is_cluster:
            if ch.has_table(dist_table):
                changed |= ensure_columns(dist_table)
            else:
                ch.execute(post=self.get_create_distributed_sql())
                changed = True
        # Synchronize view
        ch.execute(post=self.get_create_view_sql())
        return changed
Beispiel #14
0
 def handle_vacuum_bulling(self, ids, *args, **kwargs):
     connect()
     for mo_id in ids:
         mo = ManagedObject.get_by_id(mo_id)
         if not mo:
             self.print("@@@ %s is not found, skipping", mo_id)
             continue
         self.print("@@@ %s (%s, %s)", mo.name, mo.address, mo.id)
         # Get interfaces suitable for bulling
         bulling_ifaces: Set[Interface] = {
             iface
             for iface in Interface.objects.filter(managed_object=mo.id)
             if not iface.profile.allow_vacuum_bulling
         }
         if not bulling_ifaces:
             self.print("No interfaces suitable for vacuum bulling")
             continue
         # Get MAC addresses for bulling
         t0 = datetime.datetime.now() - datetime.timedelta(
             seconds=self.MAC_WINDOW)
         t0 = t0.replace(microsecond=0)
         sql = self.GET_MACS_SQL % (
             mo.bi_id,
             ", ".join("'%s'" % iface.name.replace("'", "''")
                       for iface in bulling_ifaces),
             t0.date().isoformat(),
             t0.isoformat(sep=" "),
         )
         ch = connection()
         last_ts: Optional[str] = None
         all_macs: List[str] = []
         mac_iface: Dict[str, str] = {}
         for ts, iface, mac in ch.execute(post=sql):
             if last_ts is None:
                 last_ts = ts
             elif last_ts > ts:
                 continue
             m = str(MAC(int(mac)))
             all_macs += [m]
             mac_iface[m] = iface
         # Resolve MACs to known chassis-id
         mac_map = DiscoveryID.find_objects(all_macs)
         # Filter suitable rivals
         seg_ifaces: DefaultDict[NetworkSegment,
                                 Set[str]] = defaultdict(set)
         iface_segs: DefaultDict[str,
                                 Set[NetworkSegment]] = defaultdict(set)
         for mac, r_mo in mac_map.items():
             iface = mac_iface.get(mac)
             if not iface:
                 continue
             seg_ifaces[r_mo.segment].add(iface)
             iface_segs[iface].add(r_mo.segment)
         rej_ifaces: Set[str] = set()
         for seg in seg_ifaces:
             if len(seg_ifaces[seg]
                    ) > 1 or seg.profile.is_persistent or seg == mo.segment:
                 # Seen on multiple interfaces or persistent segment or same segment
                 rej_ifaces |= set(seg_ifaces[seg])
                 continue
         for iface in sorted(iface_segs, key=alnum_key):
             if iface in rej_ifaces:
                 continue
             for seg in iface_segs[iface]:
                 self.print("  '%s' challenging '%s' on %s" %
                            (mo.segment.name, seg.name, iface))
                 BioSegTrial.schedule_trial(seg, mo.segment)
Beispiel #15
0
    def api_report(
        self,
        request,
        reporttype=None,
        from_date=None,
        to_date=None,
        object_profile=None,
        filter_default=None,
        exclude_zero=None,
        interface_profile=None,
        selector=None,
        administrative_domain=None,
        columns=None,
        o_format=None,
        enable_autowidth=False,
        exclude_serial_change=False,
        **kwargs,
    ):
        def translate_row(row, cmap):
            return [row[i] for i in cmap]

        cols = [
            "object_name",
            "object_address",
            "object_adm_domain",
            "event_type",
            "sn_changed",
            "vendor_mac",
            "mac",
            "migrate_ts",
            "from_iface_name",
            "from_iface_down",
            "to_iface_name",
            "to_iface_down",
        ]

        header_row = [
            "OBJECT_NAME",
            "OBJECT_ADDRESS",
            "OBJECT_ADM_DOMAIN",
            "EVENT_TYPE",
            "SN_CHANGED",
            "VENDOR_MAC",
            "MAC",
            "MIGRATE_TS",
            "FROM_IFACE_NAME",
            "FROM_IFACE_DOWN",
            "TO_IFACE_NAME",
            "TO_IFACE_DOWN",
        ]
        if columns:
            cmap = []
            for c in columns.split(","):
                try:
                    cmap += [cols.index(c)]
                except ValueError:
                    continue
        else:
            cmap = list(range(len(cols)))
        r = [translate_row(header_row, cmap)]

        # Date Time Block
        if not from_date:
            from_date = datetime.datetime.now() - datetime.timedelta(days=1)
        else:
            from_date = datetime.datetime.strptime(from_date, "%d.%m.%Y")
        if not to_date or from_date == to_date:
            to_date = from_date + datetime.timedelta(days=1)
        else:
            to_date = datetime.datetime.strptime(to_date, "%d.%m.%Y") + datetime.timedelta(days=1)
        # interval = (to_date - from_date).days
        # ts_from_date = time.mktime(from_date.timetuple())
        # ts_to_date = time.mktime(to_date.timetuple())

        mos = self.get_report_object(
            user=request.user, adm=administrative_domain, selector=selector
        )
        mos_id = set(mos.order_by("bi_id").values_list("bi_id", flat=True))
        if interface_profile:
            interface_profile = InterfaceProfile.objects.get(id=interface_profile)
            iface_filter = (
                "dictGetString('interfaceattributes', 'profile', (managed_object, interface)) == '%s'"
                % interface_profile.name
            )
        else:
            iface_filter = "is_uni = 1"
        serials_changed = {}
        ch = connection()
        for row in ch.execute(
            DEVICE_MOVED_QUERY
            % (
                from_date.date().isoformat(),
                (to_date.date() - datetime.timedelta(days=1)).isoformat(),
            )
        ):
            serials_changed[int(row[0])] = row[1]
        for (
            mo,
            mac,
            mo_name,
            mo_address,
            mo_adm_domain,
            ifaces,
            migrate_ifaces,
            migrate_count,
        ) in ch.execute(
            MAC_MOVED_QUERY
            % (iface_filter, from_date.date().isoformat(), to_date.date().isoformat())
        ):
            if int(mo) not in mos_id:
                continue
            if exclude_serial_change and int(mo) in serials_changed:
                continue
            iface_from, iface_to, migrate = get_interface(ifaces)
            event_type = _("Migrate")
            if (
                rx_port_num.search(iface_from).group() == rx_port_num.search(iface_to).group()
                and iface_from != iface_to
            ):
                event_type = _("Migrate (Device Changed)")
            r += [
                translate_row(
                    [
                        mo_name,
                        mo_address,
                        mo_adm_domain,
                        event_type,
                        _("Yes") if int(mo) in serials_changed else _("No"),
                        MACVendor.get_vendor(mac),
                        mac,
                        datetime.datetime.fromtimestamp(migrate[1]).isoformat(sep=" "),  # TS
                        iface_from,
                        "--",
                        iface_to,
                        "--",
                    ],
                    cmap,
                )
            ]

        filename = "macs_move_report_%s" % datetime.datetime.now().strftime("%Y%m%d")
        if o_format == "csv":
            response = HttpResponse(content_type="text/csv")
            response["Content-Disposition"] = 'attachment; filename="%s.csv"' % filename
            writer = csv.writer(response, dialect="excel", delimiter=",", quoting=csv.QUOTE_MINIMAL)
            writer.writerows(r)
            return response
        elif o_format == "csv_zip":
            response = BytesIO()
            f = TextIOWrapper(TemporaryFile(mode="w+b"), encoding="utf-8")
            writer = csv.writer(f, dialect="excel", delimiter=";", quotechar='"')
            writer.writerows(r)
            f.seek(0)
            with ZipFile(response, "w", compression=ZIP_DEFLATED) as zf:
                zf.writestr("%s.csv" % filename, f.read())
                zf.filename = "%s.csv.zip" % filename
            # response = HttpResponse(content_type="text/csv")
            response.seek(0)
            response = HttpResponse(response.getvalue(), content_type="application/zip")
            response["Content-Disposition"] = 'attachment; filename="%s.csv.zip"' % filename
            return response
        elif o_format == "xlsx":
            response = BytesIO()
            wb = xlsxwriter.Workbook(response)
            cf1 = wb.add_format({"bottom": 1, "left": 1, "right": 1, "top": 1})
            ws = wb.add_worksheet("Alarms")
            max_column_data_length = {}
            for rn, x in enumerate(r):
                for cn, c in enumerate(x):
                    if rn and (
                        r[0][cn] not in max_column_data_length
                        or len(str(c)) > max_column_data_length[r[0][cn]]
                    ):
                        max_column_data_length[r[0][cn]] = len(str(c))
                    ws.write(rn, cn, c, cf1)
            ws.autofilter(0, 0, rn, cn)
            ws.freeze_panes(1, 0)
            for cn, c in enumerate(r[0]):
                # Set column width
                width = get_column_width(c)
                if enable_autowidth and width < max_column_data_length[c]:
                    width = max_column_data_length[c]
                ws.set_column(cn, cn, width=width)
            wb.close()
            response.seek(0)
            response = HttpResponse(response.getvalue(), content_type="application/vnd.ms-excel")
            response["Content-Disposition"] = 'attachment; filename="%s.xlsx"' % filename
            response.close()
            return response