예제 #1
0
def json_get_running_times():
    now = util.utcnow()  # .replace(tzinfo=None) # everything is implicitly UTC

    running_times = current_app.db.HostDoc.find({"status": "RUNNING"}, {
        "_id": False,
        "last_change": True,
        "stage": True
    })

    running_times = list(running_times)

    df = DataFrame(running_times)
    df["age"] = ((now - df["last_change"]) / np.timedelta64(1, "s")).astype(
        int)  # timedelta to seconds
    df["tally"] = 1
    df = df.set_index(["stage", "age"])
    g = df.groupby(level=["stage", "age"])
    df2 = g.sum()
    df2 = df2.fillna(0)

    results = OrderedDict()  # the order matters to c3js
    results["x"] = list(df2.loc["NETSCAN1"].index)
    # TODO stages not guaranteed to be in index
    results["NETSCAN1"] = list(df2.loc["NETSCAN1"]["tally"].astype(int))
    results["NETSCAN2"] = list(df2.loc["NETSCAN2"]["tally"].astype(int))
    results["PORTSCAN"] = list(df2.loc["PORTSCAN"]["tally"].astype(int))
    results["VULNSCAN"] = list(df2.loc["VULNSCAN"]["tally"].astype(int))
    # import IPython; IPython.embed() #<<< BREAKPOINT >>>

    return json.dumps(results, default=util.custom_json_handler)
예제 #2
0
 def __check_database(self):
     now = util.utcnow()
     tickets = self.__db.tickets.find(
         {
             "source": "nessus",
             "last_change": {
                 "$gt": self.__since
             }
         },
         {
             "_id": 1,
             "owner": 1,
             "details": 1,
             "events.action": 1,
             "events": {
                 "$slice": -1
             },
         },
     ).sort([("last_change", -1)])
     tickets = list(tickets)
     for (
             x
     ) in tickets:  # convert _ids to string since we can't set custom json handler
         x["_id"] = str(x["_id"])
     self.__since = now
     return tickets
예제 #3
0
    def close_tickets(self, closing_time=None):
        if closing_time is None:
            closing_time = util.utcnow()

        not_up_ips = self.__ips - self.__seen_ips

        ip_ints = [int(i) for i in not_up_ips]

        # find tickets with ips that were not up and are open
        tickets = self.__db.TicketDoc.find({"ip_int": {"$in": ip_ints}, "open": True})

        for ticket in tickets:
            # don't close tickets that are false_positives, just add event
            reason = "host down"
            self.__check_false_positive_expiration(
                ticket, closing_time.replace(tzinfo=tz.tzutc())
            )  # explicitly set to UTC (see CYHY-286)
            if ticket["false_positive"] is True:
                event = {
                    "time": closing_time,
                    "action": TICKET_EVENT.UNVERIFIED,
                    "reason": reason,
                    "reference": None,
                }
            else:
                ticket["open"] = False
                ticket["time_closed"] = closing_time
                event = {
                    "time": closing_time,
                    "action": TICKET_EVENT.CLOSED,
                    "reason": reason,
                    "reference": None,
                }
            ticket["events"].append(event)
            ticket.save()
예제 #4
0
 def __init__(self, db, emitter, logger, sleep_time=3000, history_size=100):
     self.__db = db
     self.__emitter = emitter
     self.__logger = logger
     self.__sleep_time = sleep_time
     self.__since = util.utcnow() - datetime.timedelta(minutes=30)
     self.__history = deque(maxlen=history_size)
     schedule.every(sleep_time).seconds.do(self.__work)
예제 #5
0
def get_cybex_dataframe(db, start_date, ticket_severity):
    now = util.utcnow()
    tomorrow = now + datetime.timedelta(days=1)
    days_to_graph = pd.to_datetime(pd.date_range(start_date, now), utc=True)

    fed_executive_owners = db.RequestDoc.get_all_descendants("EXECUTIVE")

    # Calculate Buckets
    tix = db.TicketDoc.find(
        {
            "source": "nessus",
            "details.severity": ticket_severity,
            "false_positive": False,
            "owner": {
                "$in": fed_executive_owners
            },
            "$or": [{
                "time_closed": {
                    "$gte": start_date
                }
            }, {
                "time_closed": None
            }],
        },
        {
            "_id": False,
            "time_opened": True,
            "time_closed": True
        },
    )

    tix = list(tix)
    df = DataFrame(tix)
    results_df = DataFrame(index=days_to_graph,
                           columns=["young", "old", "total"])
    if not df.empty:
        df.time_closed = df.time_closed.fillna(
            tomorrow
        )  # for accounting purposes, say all open tix will close tomorrow
        # convert times to datetime64
        df.time_closed = pd.to_datetime(df.time_closed, utc=True)
        df.time_opened = pd.to_datetime(df.time_opened, utc=True)

        old_delta = np.timedelta64(TICKETS_CLOSED_PAST_DAYS, "D")

        for start_of_day, values in results_df.iterrows():
            end_of_day = start_of_day + np.timedelta64(
                1, "D") - np.timedelta64(1, "ns")
            open_on_day_mask = (df.time_opened <=
                                end_of_day) & (df.time_closed > start_of_day)
            age_on_date = start_of_day - df.time_opened
            age_on_date_masked = age_on_date.mask(open_on_day_mask == False)
            values["total"] = open_on_day_mask.value_counts().get(True, 0)
            values["young"] = ((age_on_date_masked <
                                old_delta).value_counts().get(True, 0))
            values["old"] = ((age_on_date_masked >=
                              old_delta).value_counts().get(True, 0))
    return results_df
예제 #6
0
 def __check_database(self):
     now = util.utcnow()
     tickets = self.__db.new_hire.find({"latest": True})
     tickets = list(tickets)
     for (
         x
     ) in tickets:  # convert _ids to string since we can't set custom json handler
         x["_id"] = str(x["_id"])
     self.__since = now
     return tickets
예제 #7
0
    def close_tickets(self):
        if self.__closing_time is None:
            # You don't have to go home but you can't stay here
            self.__closing_time = util.utcnow()
        ip_ints = [int(i) for i in self.__ips]

        # find tickets that are covered by this scan, but weren't just touched
        # TODO: this is the way I wanted to do it, but it blows up mongo
        # tickets = self.__db.TicketDoc.find({'ip_int':{'$in':ip_ints},
        #                                     'port':{'$in':self.__ports},
        #                                     'source_id':{'$in':self.__source_ids},
        #                                     '_id':{'$nin':list(self.__seen_ticket_ids)},
        #                                     'source':self.__source,
        #                                     'open':True})

        # work-around using a pipeline
        tickets = database.run_pipeline_cursor(
            close_tickets_pl(
                ip_ints,
                list(self.__ports),
                list(self.__source_ids),
                list(self.__seen_ticket_ids),
                self.__source,
            ),
            self.__db,
        )

        for raw_ticket in tickets:
            ticket = self.__db.TicketDoc(raw_ticket)  # make it managed
            # don't close tickets that are false_positives, just add event
            reason = "vulnerability not detected"
            self.__check_false_positive_expiration(
                ticket, self.__closing_time.replace(tzinfo=tz.tzutc())
            )  # explicitly set to UTC (see CYHY-286)
            if ticket["false_positive"] is True:
                event = {
                    "time": self.__closing_time,
                    "action": TICKET_EVENT.UNVERIFIED,
                    "reason": reason,
                    "reference": None,
                }
            else:
                ticket["open"] = False
                ticket["time_closed"] = self.__closing_time
                event = {
                    "time": self.__closing_time,
                    "action": TICKET_EVENT.CLOSED,
                    "reason": reason,
                    "reference": None,
                }
            if self.__manual_scan:
                event["manual"] = True
            ticket["events"].append(event)
            ticket.save()
예제 #8
0
    def transition_host(self,
                        ip,
                        up=None,
                        reason=None,
                        has_open_ports=None,
                        was_failure=False):
        """Attempts to move host from one state to another.
           returns (HostDoc, state_changed)
            - HostDoc: host that was transitioned
            - state_changed: True if the state changed, False otherwise."""
        host = self.__db.HostDoc.get_by_ip(ip)
        if host == None:
            self.__logger.warning(
                "Could not find %s in database during transition_host call" %
                ip)
            return (None, False)

        prev_stage = host["stage"]
        prev_status = host["status"]
        host_transitioned, host_finished_stage = self.__state_manager.transition(
            host, up, has_open_ports, was_failure)

        # Calculating the state of a HostDoc is non-trivial.
        host.set_state(up, has_open_ports, reason)

        # If host finished a stage, update timestamp for latest_scan.<stage_that_just_finished>
        current_time = util.utcnow()
        if host_finished_stage:
            host["latest_scan"][prev_stage] = current_time

        if host["status"] == STATUS.DONE:
            host["latest_scan"][
                STATUS.
                DONE] = current_time  # Update timestamp for reaching DONE status
            # check to see if owner should use a scheduler
            request = self.__db.requests.find_one({"_id": host["owner"]},
                                                  {"scheduler": True})
            if request and request.get("scheduler") != None:
                self.__scheduler.schedule(host)

        # save all changes made to the host by the state manager and the scheduler
        host.save()

        if host_transitioned:
            new_stage = host["stage"]
            new_status = host["status"]
            owner = host["owner"]
            self.tally_update(owner, prev_stage, prev_status, new_stage,
                              new_status)

        return (host, host_transitioned)
예제 #9
0
    def close_tickets(self, closing_time=None):
        if closing_time is None:
            closing_time = util.utcnow()
        ip_ints = [int(i) for i in self.__ips]

        all_ports_scanned = len(self.__ports) == MAX_PORTS_COUNT

        if all_ports_scanned:
            # If all the ports were scanned we have an opportunity to close port 0
            # tickets. This can only be done if no ports are open for an IP.
            # Otherwise they can be closed in the VULNSCAN stage.
            ips_with_no_open_ports = self.__ips - IPSet(self.__seen_ip_port.keys())
            ips_with_no_open_ports_ints = [int(i) for i in ips_with_no_open_ports]

            # Close all tickets regardless of protocol for ips_with_no_open_ports
            tickets_to_close = self.__db.TicketDoc.find(
                {"ip_int": {"$in": ips_with_no_open_ports_ints}, "open": True}
            )

            for ticket in tickets_to_close:
                self.__handle_ticket_port_closed(ticket, closing_time)

            # handle ips that had at least one port open
            # next query optimized for all_ports_scanned
            tickets = self.__db.TicketDoc.find(
                {
                    "ip_int": {"$in": ip_ints},
                    "port": {"$ne": 0},
                    "protocol": {"$in": list(self.__protocols)},
                    "open": True,
                }
            )
        else:
            # not all ports scanned
            tickets = self.__db.TicketDoc.find(
                {
                    "ip_int": {"$in": ip_ints},
                    "port": {"$in": list(self.__ports)},
                    "protocol": {"$in": list(self.__protocols)},
                    "open": True,
                }
            )

        for ticket in tickets:
            if ticket["port"] in self.__seen_ip_port[ticket["ip"]]:
                # this ticket's ip:port was open, so we skip closing it
                continue
            self.__handle_ticket_port_closed(ticket, closing_time)
예제 #10
0
 def request_limits(self, when=None):
     # returns {owner: {stage:limit, stage:limit, ...}, ...}
     if when == None:
         when = util.utcnow()
     requests = self.__db.RequestDoc.find({"scan_types": SCAN_TYPE.CYHY})
     results = {}
     for request in requests:
         if request["period_start"] < when and time_calc.in_windows(
                 request["windows"], when):
             limits = copy.copy(self.TEMP_MAX_CONCURRENCY)
             for limit in request.get("scan_limits", []):
                 limits[limit["scanType"]] = limit["concurrent"]
         else:
             limits = self.TEMP_OFF_CONCURRENCY
         results[request["_id"]] = limits
     return results
예제 #11
0
파일: cybex.py 프로젝트: cisagov/ncats-webd
def csv_get_open_tickets(ticket_severity):
    if ticket_severity == CRITICAL_SEVERITY:
        severity_name = "_critical"
    elif ticket_severity == HIGH_SEVERITY:
        severity_name = "_high"
    else:
        severity_name = ""

    csv = ncats_webd.cybex_queries.csv_get_open_tickets(
        current_app.db, ticket_severity)
    response = Response(csv, mimetype="text/csv")
    response.headers[
        "Content-Disposition"] = "attachment; filename=cybex_open_tickets{!s}_{!s}.csv".format(
            severity_name,
            util.utcnow().strftime("%Y%m%d"))
    return response
예제 #12
0
    def schedule(self, host):
        super(DefaultScheduler, self).schedule(host)

        # determine the new priority for the host
        if host["state"]["up"] == False:
            self.__process_down_host(host)
        else:
            # host was up
            max_severity = self.__host_max_severity(host)
            if max_severity > 0:
                self.__process_vuln_host(host, max_severity)
            else:
                self.__process_vuln_free_host(host)

        # determine the next scan time based on the priority
        d = self.__timedelta_for_priority(host["priority"])
        host["next_scan"] = util.utcnow() + d
예제 #13
0
파일: bod.py 프로젝트: cisagov/ncats-webd
def get_bod_open_tickets_dataframe(bod_start_date):
    bod_owners = current_app.db.RequestDoc.get_all_descendants("EXECUTIVE")
    tix = current_app.db.TicketDoc.find(
        {
            "source": "nessus",
            "details.severity": 4,
            "false_positive": False,
            "owner": {"$in": bod_owners},
            "open": True,
        },
        {
            "_id": False,
            "owner": True,
            "time_opened": True,
            "ip": True,
            "port": True,
            "details.name": True,
            "details.cve": True,
        },
    )
    tix = list(tix)
    for x in tix:
        x.update(x["details"])
        del x["details"]
    df = DataFrame(tix)
    if not df.empty:
        bsd_utc = pd.to_datetime(
            bod_start_date
        )  # Store bod_start_date as pandas Timestamp
        df["time_opened"] = df.time_opened.dt.tz_convert(
            "UTC"
        )  # Mark df.time_opened as UTC
        df["bod_time_opened"] = df.time_opened.apply(
            lambda x, bsd_utc=bsd_utc: max(x, bsd_utc)
        )
        now = pd.to_datetime(
            util.utcnow()
        )  # Store current UTC time as pandas Timestamp
        df["bod_age_td"] = now - df.bod_time_opened
        df["bod_age"] = df["bod_age_td"].astype("timedelta64[D]")
        del df["bod_time_opened"]
        del df["bod_age_td"]
        df.sort_values(by="bod_age", ascending=False, inplace=True)
    return df
예제 #14
0
def get_stats(db):
    orgs = db.RequestDoc.get_owner_types(as_lists=True,
                                         stakeholders_only=False,
                                         include_retired=True)
    orgs["SLTT"] = (orgs[AGENCY_TYPE.STATE] + orgs[AGENCY_TYPE.LOCAL] +
                    orgs[AGENCY_TYPE.TRIBAL] + orgs[AGENCY_TYPE.TERRITORIAL])
    now = util.utcnow()
    rd = util.report_dates(now)

    ticket = dict()
    ticket["open_non_FP_tix"], ticket["open_FP_tix"] = total_open(db, orgs)
    (
        ticket["FY_ticket_counts_by_year"],
        ticket["FY_ticket_count_totals"],
    ) = ticket_counts_by_fiscal_year(db, rd["fy_start"])
    ticket["currentFYactivity"] = return_activity(db, orgs,
                                                  "Current FY Activity",
                                                  rd["fy_start"], now)
    ticket["previousFYactivity"] = return_activity(db, orgs,
                                                   "Previous FY Activity",
                                                   rd["prev_fy_start"],
                                                   rd["prev_fy_end"])
    ticket["currentMonthActivity"] = return_activity(db, orgs,
                                                     "Current Month Activity",
                                                     rd["month_start"], now)
    ticket["previousMonthActivity"] = return_activity(
        db,
        orgs,
        "Previous Month Activity",
        rd["prev_month_start"],
        rd["prev_month_end"],
    )
    ticket["currentWeekActivity"] = return_activity(db, orgs,
                                                    "Current Week Activity",
                                                    rd["week_start"], now)
    ticket["previousWeekActivity"] = return_activity(db, orgs,
                                                     "Previous Week Activity",
                                                     rd["prev_week_start"],
                                                     rd["prev_week_end"])
    ticket["stakeholders_scanned"] = scanning_breakdown(db)
    ticket["all_time_tickets"] = all_time_opened_and_closed_breakdown(db)
    return rd, ticket
예제 #15
0
def get_closed_tickets_dataframe(db, ticket_severity):
    closed_since_date = util.utcnow() - datetime.timedelta(
        days=TICKETS_CLOSED_PAST_DAYS)

    fed_executive_owners = db.RequestDoc.get_all_descendants("EXECUTIVE")
    tix = db.TicketDoc.find(
        {
            "source": "nessus",
            "time_closed": {
                "$gte": closed_since_date
            },
            "details.severity": ticket_severity,
            "owner": {
                "$in": fed_executive_owners
            },
            "open": False,
        },
        {
            "_id": False,
            "owner": True,
            "time_opened": True,
            "time_closed": True,
            "ip": True,
            "port": True,
            "details.name": True,
            "details.cve": True,
        },
    )

    tix = list(tix)
    for x in tix:
        x.update(x["details"])
        del x["details"]
    df = DataFrame(tix)
    if not df.empty:
        df["days_to_close"] = ((df.time_closed - df.time_opened).apply(
            lambda x: x.total_seconds() / 86400.0).round(1))
        df.sort_values(by="time_closed", ascending=True, inplace=True)
    return df
예제 #16
0
    def check_host_next_scans(self):
        """Moves hosts to WAITING status based on their "next_scan" field.
        jump_starts previously "up" hosts, and ignores the owner init_stage (to be deprecated)
        returns the number of modified hosts"""
        now = util.utcnow()
        # move "up" hosts to STAGE.PORTSCAN, STATUS.WAITING
        up_hosts_cursor = self.__db.HostDoc.get_scheduled_hosts(
            True, now, self.__next_scan_limit)
        self.__logger.debug(
            'Updating previous "up" hosts that are now due to be scanned.')
        hosts_processed = self.__update_hosts_next_scans(
            up_hosts_cursor, STAGE.PORTSCAN, STATUS.WAITING)
        self.__logger.debug('Updated %d "up" hosts.' % hosts_processed)

        # move "down" hosts to STAGE.NETSCAN1, STATUS.WAITING
        down_hosts_cursor = self.__db.HostDoc.get_scheduled_hosts(
            False, now, self.__next_scan_limit)
        self.__logger.debug(
            'Updating previous "down" hosts that are now due to be scanned.')
        hosts_processed = self.__update_hosts_next_scans(
            down_hosts_cursor, STAGE.NETSCAN1, STATUS.WAITING)
        self.__logger.debug('Updated %d "down" hosts.' % hosts_processed)
예제 #17
0
def in_windows(windows, time=None):
    if time == None:
        time = util.utcnow()

    for w in windows:
        parse_me = "%s %s" % (w["day"], w["start"])
        dt = parser.parse(parse_me)
        dow = dt.weekday()
        relative_weekday = dateutil.relativedelta.weekday(dow)
        duration = int(w["duration"])
        delta = relativedelta(
            weekday=relative_weekday(-1),
            hour=dt.hour,
            minute=dt.minute,
            second=dt.second,
            microsecond=dt.microsecond,
        )
        window_start = time + delta
        window_duration = relativedelta(hours=+duration)
        window_close = window_start + window_duration
        if time > window_start and time < window_close:
            return True
    return False
예제 #18
0
    def open_ticket(self, vuln, reason):
        if self.__closing_time is None or self.__closing_time < vuln["time"]:
            self.__closing_time = vuln["time"]

        # search for previous open ticket that matches
        prev_open_ticket = self.__db.TicketDoc.find_one(
            {
                "ip_int": long(vuln["ip"]),
                "port": vuln["port"],
                "protocol": vuln["protocol"],
                "source": vuln["source"],
                "source_id": vuln["plugin_id"],
                "open": True,
            }
        )
        if prev_open_ticket:
            self.__generate_ticket_details(vuln, prev_open_ticket)
            self.__check_false_positive_expiration(
                prev_open_ticket, vuln["time"].replace(tzinfo=tz.tzutc())
            )  # explicitly set to UTC (see CYHY-286)
            # add an entry to the existing open ticket
            event = {
                "time": vuln["time"],
                "action": TICKET_EVENT.VERIFIED,
                "reason": reason,
                "reference": vuln["_id"],
            }
            if self.__manual_scan:
                event["manual"] = True
            prev_open_ticket["events"].append(event)
            prev_open_ticket.save()
            self.__mark_seen(prev_open_ticket)
            return

        # no matching tickets are currently open
        # search for a previously closed ticket that was closed before the cutoff
        cutoff_date = util.utcnow() + self.__reopen_delta
        reopen_ticket = self.__db.TicketDoc.find_one(
            {
                "ip_int": long(vuln["ip"]),
                "port": vuln["port"],
                "protocol": vuln["protocol"],
                "source": vuln["source"],
                "source_id": vuln["plugin_id"],
                "open": False,
                "time_closed": {"$gt": cutoff_date},
            }
        )

        if reopen_ticket:
            self.__generate_ticket_details(vuln, reopen_ticket)
            event = {
                "time": vuln["time"],
                "action": TICKET_EVENT.REOPENED,
                "reason": reason,
                "reference": vuln["_id"],
            }
            if self.__manual_scan:
                event["manual"] = True
            reopen_ticket["events"].append(event)
            reopen_ticket["time_closed"] = None
            reopen_ticket["open"] = True
            reopen_ticket.save()
            self.__mark_seen(reopen_ticket)
            return

        # time to open a new ticket
        new_ticket = self.__db.TicketDoc()
        new_ticket.ip = vuln["ip"]
        new_ticket["port"] = vuln["port"]
        new_ticket["protocol"] = vuln["protocol"]
        new_ticket["source"] = vuln["source"]
        new_ticket["source_id"] = vuln["plugin_id"]
        new_ticket["owner"] = vuln["owner"]
        new_ticket["time_opened"] = vuln["time"]
        self.__generate_ticket_details(vuln, new_ticket, check_for_changes=False)

        host = self.__db.HostDoc.get_by_ip(vuln["ip"])
        if host is not None:
            new_ticket["loc"] = host["loc"]

        event = {
            "time": vuln["time"],
            "action": TICKET_EVENT.OPENED,
            "reason": reason,
            "reference": vuln["_id"],
        }
        if self.__manual_scan:
            event["manual"] = True
        new_ticket["events"].append(event)

        if (
            new_ticket["owner"] == UNKNOWN_OWNER
        ):  # close tickets with no owner immediately
            event = {
                "time": vuln["time"],
                "action": TICKET_EVENT.CLOSED,
                "reason": "No associated owner",
                "reference": None,
            }
            if self.__manual_scan:
                event["manual"] = True
            new_ticket["events"].append(event)
            new_ticket["open"] = False
            new_ticket["time_closed"] = self.__closing_time

        new_ticket.save()
        self.__mark_seen(new_ticket)

        # Create notifications for Highs (3) or Criticals (4)
        if new_ticket["details"]["severity"] > 2:
            self.__create_notification(new_ticket)
예제 #19
0
파일: bod.py 프로젝트: cisagov/ncats-webd
def get_bod_dataframe(bod_start_date):
    now = util.utcnow()
    tomorrow = now + datetime.timedelta(days=1)
    days_of_the_bod = pd.to_datetime(pd.date_range(bod_start_date, now), utc=True)

    bod_owners = current_app.db.RequestDoc.get_all_descendants("EXECUTIVE")

    backlog_tix = current_app.db.TicketDoc.find(
        {
            "source": "nessus",
            "details.severity": 4,
            "owner": {"$in": bod_owners},
            "time_opened": {"$lte": bod_start_date},
            "false_positive": False,
            "$or": [{"time_closed": {"$gte": bod_start_date}}, {"time_closed": None}],
        },
        {"_id": False, "time_closed": True},
    )

    results_df = DataFrame(
        index=days_of_the_bod, columns=["young", "mid", "old", "total"]
    )
    backlog_tix = list(backlog_tix)
    df = DataFrame(backlog_tix)
    if not df.empty:
        df["tally"] = 1
        df.time_closed = df.time_closed.fillna(
            tomorrow
        )  # assume they'll close tomorrow
        df.time_closed = df.time_closed.apply(
            pd.to_datetime
        )  # if fillna is noop we have wrong types still

        df_backlog = df.set_index("time_closed")
        df_backlog = df_backlog.resample("1D").sum()

        df_backlog = df_backlog.reindex(days_of_the_bod)  # does not include tomorrow
        df_backlog.tally = df_backlog.tally.fillna(0).astype(np.int)
        df_backlog["csum"] = df_backlog.tally.cumsum()
        df_backlog["remaining"] = len(backlog_tix) - df_backlog.csum
    else:
        df_backlog = DataFrame()
    # Calculate Buckets
    tix = current_app.db.TicketDoc.find(
        {
            "source": "nessus",
            "details.severity": 4,
            "false_positive": False,
            "owner": {"$in": bod_owners},
            "$or": [{"time_closed": {"$gte": bod_start_date}}, {"time_closed": None}],
        },
        {"_id": False, "time_opened": True, "time_closed": True},
    )
    df = DataFrame(list(tix))
    if not df.empty:
        # Convert times to pandas Timestamp
        df.time_opened = pd.to_datetime(df.time_opened, utc=True)
        # For tickets that haven't closed yet, also set time_closed to tomorrow
        # df.time_closed = pd.to_datetime(df.time_closed.fillna(tomorrow), utc=True).dt.tz_localize('UTC')
        df.time_closed = pd.to_datetime(
            df.time_closed.fillna(tomorrow), utc=True
        ).dt.tz_convert("UTC")
        bsd_utc = pd.to_datetime(
            bod_start_date
        )  # Store bod_start_date as pandas Timestamp
        df["bod_time_opened"] = df.time_opened.apply(
            lambda x, bsd_utc=bsd_utc: max(x, bsd_utc)
        )
        df["bod_age"] = df.time_closed - df.bod_time_opened

        mid_delta = np.timedelta64(BOD_CUTOFF_1, "D")
        old_delta = np.timedelta64(BOD_CUTOFF_2, "D")

        for start_of_day, values in results_df.iterrows():
            end_of_day = start_of_day + np.timedelta64(1, "D") - np.timedelta64(1, "ns")
            open_on_day_mask = (df.time_opened <= end_of_day) & (
                df.time_closed > start_of_day
            )
            bod_age_on_date = start_of_day - df.bod_time_opened
            bod_age_on_date_masked = bod_age_on_date.mask(open_on_day_mask == False)
            values["total"] = open_on_day_mask.value_counts().get(True, 0)
            values["young"] = (
                (bod_age_on_date_masked < mid_delta).value_counts().get(True, 0)
            )
            values["mid"] = (
                (
                    (bod_age_on_date_masked >= mid_delta)
                    & (bod_age_on_date_masked < old_delta)
                )
                .value_counts()
                .get(True, 0)
            )
            values["old"] = (
                (bod_age_on_date_masked >= old_delta).value_counts().get(True, 0)
            )

        if not df_backlog.empty:
            # combine previous calculations
            results_df["backlog"] = df_backlog.remaining
    return results_df
예제 #20
0
    def open_ticket(self, portscan, reason):
        if self.__closing_time is None or self.__closing_time < portscan["time"]:
            self.__closing_time = portscan["time"]

        # search for previous open ticket that matches
        prev_open_ticket = self.__db.TicketDoc.find_one(
            {
                "ip_int": portscan["ip_int"],
                "port": portscan["port"],
                "protocol": portscan["protocol"],
                "source": portscan["source"],
                "source_id": portscan["source_id"],
                "open": True,
            }
        )
        if prev_open_ticket:
            self.__check_false_positive_expiration(
                prev_open_ticket, portscan["time"].replace(tzinfo=tz.tzutc())
            )  # explicitly set to UTC (see CYHY-286)
            # add an entry to the existing open ticket
            event = {
                "time": portscan["time"],
                "action": TICKET_EVENT.VERIFIED,
                "reason": reason,
                "reference": portscan["_id"],
            }
            prev_open_ticket["events"].append(event)
            prev_open_ticket.save()
            return

        # no matching tickets are currently open
        # search for a previously closed ticket that was closed before the cutoff
        cutoff_date = util.utcnow() + self.__reopen_delta
        reopen_ticket = self.__db.TicketDoc.find_one(
            {
                "ip_int": portscan["ip_int"],
                "port": portscan["port"],
                "protocol": portscan["protocol"],
                "source": portscan["source"],
                "source_id": portscan["source_id"],
                "open": False,
                "time_closed": {"$gt": cutoff_date},
            }
        )

        if reopen_ticket:
            event = {
                "time": portscan["time"],
                "action": TICKET_EVENT.REOPENED,
                "reason": reason,
                "reference": portscan["_id"],
            }
            reopen_ticket["events"].append(event)
            reopen_ticket["time_closed"] = None
            reopen_ticket["open"] = True
            reopen_ticket.save()
            return

        # time to open a new ticket
        new_ticket = self.__db.TicketDoc()
        new_ticket.ip = portscan["ip"]
        new_ticket["port"] = portscan["port"]
        new_ticket["protocol"] = portscan["protocol"]
        new_ticket["source"] = portscan["source"]
        new_ticket["source_id"] = portscan["source_id"]
        new_ticket["owner"] = portscan["owner"]
        new_ticket["time_opened"] = portscan["time"]
        new_ticket["details"] = {
            "cve": None,
            "score_source": None,
            "cvss_base_score": None,
            "severity": 0,
            "name": portscan["name"],
            "service": portscan["service"],
        }

        host = self.__db.HostDoc.get_by_ip(portscan["ip"])
        if host is not None:
            new_ticket["loc"] = host["loc"]

        event = {
            "time": portscan["time"],
            "action": TICKET_EVENT.OPENED,
            "reason": reason,
            "reference": portscan["_id"],
        }
        new_ticket["events"].append(event)

        if (
            new_ticket["owner"] == UNKNOWN_OWNER
        ):  # close tickets with no owner immediately
            event = {
                "time": portscan["time"],
                "action": TICKET_EVENT.CLOSED,
                "reason": "No associated owner",
                "reference": None,
            }
            new_ticket["events"].append(event)
            new_ticket["open"] = False
            new_ticket["time_closed"] = self.__closing_time

        new_ticket.save()

        # Create a notification for this ticket
        self.__create_notification(new_ticket)
예제 #21
0
def congressional_data(db, start_date, end_date):
    if start_date == None:
        return "Must input a start date. Received %s & %s" % (start_date,
                                                              end_date)
    if end_date == None:
        end_date = util.utcnow()

    # Needed when run as a standalone script:
    # start_date = parser.parse(start_date)
    # end_date = parser.parse(end_date)

    all_stakeholders = db.RequestDoc.get_owner_types(as_lists=True,
                                                     stakeholders_only=True)

    active_fed_stakeholders = db.ReportDoc.find({
        "owner": {
            "$in": all_stakeholders[AGENCY_TYPE.FEDERAL]
        },
        "generated_time": {
            "$gte": start_date,
            "$lt": end_date
        },
    }).distinct("owner")
    active_fed_stakeholders.sort()
    active_fed_owners = list()
    for org in active_fed_stakeholders:
        active_fed_owners.append(org)
        active_fed_owners += db.RequestDoc.get_all_descendants(org)

    fed_executive_stakeholders = db.RequestDoc.get_by_owner(
        "EXECUTIVE")["children"]
    active_fed_executive_stakeholders = db.ReportDoc.find({
        "owner": {
            "$in": fed_executive_stakeholders
        },
        "generated_time": {
            "$gte": start_date,
            "$lt": end_date
        },
    }).distinct("owner")
    active_fed_executive_stakeholders.sort()
    active_fed_executive_owners = list()
    for org in active_fed_executive_stakeholders:
        active_fed_executive_owners.append(org)
        active_fed_executive_owners += db.RequestDoc.get_all_descendants(org)

    fed_cfo_stakeholders = db.RequestDoc.get_by_owner(
        "FED_CFO_ACT")["children"]
    active_fed_cfo_stakeholders = db.ReportDoc.find({
        "owner": {
            "$in": fed_cfo_stakeholders
        },
        "generated_time": {
            "$gte": start_date,
            "$lt": end_date
        },
    }).distinct("owner")
    active_fed_cfo_stakeholders.sort()
    active_fed_cfo_owners = list()
    for org in active_fed_cfo_stakeholders:
        active_fed_cfo_owners.append(org)
        active_fed_cfo_owners += db.RequestDoc.get_all_descendants(org)

    fed_exec_non_cfo_stakeholders = list(
        set(fed_executive_stakeholders) - set(fed_cfo_stakeholders))
    active_fed_exec_non_cfo_stakeholders = db.ReportDoc.find({
        "owner": {
            "$in": fed_exec_non_cfo_stakeholders
        },
        "generated_time": {
            "$gte": start_date,
            "$lt": end_date
        },
    }).distinct("owner")
    active_fed_exec_non_cfo_stakeholders.sort()
    active_fed_exec_non_cfo_owners = list()
    for org in active_fed_exec_non_cfo_stakeholders:
        active_fed_exec_non_cfo_owners.append(org)
        active_fed_exec_non_cfo_owners += db.RequestDoc.get_all_descendants(
            org)

    SLTT_stakeholders = (all_stakeholders[AGENCY_TYPE.STATE] +
                         all_stakeholders[AGENCY_TYPE.LOCAL] +
                         all_stakeholders[AGENCY_TYPE.TRIBAL] +
                         all_stakeholders[AGENCY_TYPE.TERRITORIAL])
    active_SLTT_stakeholders = db.ReportDoc.find({
        "owner": {
            "$in": SLTT_stakeholders
        },
        "generated_time": {
            "$gte": start_date,
            "$lt": end_date
        },
    }).distinct("owner")
    # active_SLTT_stakeholders.sort()
    # active_SLTT_owners = list()
    # for org in active_SLTT_stakeholders:
    #     active_SLTT_owners.append(org)
    #     active_SLTT_owners += db.RequestDoc.get_all_descendants(org)

    active_private_stakeholders = db.ReportDoc.find({
        "owner": {
            "$in": all_stakeholders[AGENCY_TYPE.PRIVATE]
        },
        "generated_time": {
            "$gte": start_date,
            "$lt": end_date
        },
    }).distinct("owner")
    # active_private_stakeholders.sort()
    # active_private_owners = list()
    # for org in active_private_stakeholders:
    #     active_private_owners.append(org)
    #     active_private_owners += db.RequestDoc.get_all_descendants(org)

    print "Congressional Cyber Hygiene Metrics Report"
    mylist = {}
    print "Date Range: {} - {}".format(
        start_date.strftime("%Y-%m-%d %H:%M UTC"),
        end_date.strftime("%Y-%m-%d %H:%M UTC"),
    )

    print "\n--------------------------------------------------"
    print "Critical Vulnerability Remediation Performance:\n"
    # Use "_owners" lists here because we DO want to include descendent orgs in these metrics
    for (stakeholder_list, group_name) in (
        (active_fed_owners, "FEDERAL"),
        (active_fed_executive_owners, "FEDERAL EXECUTIVE"),
        (active_fed_cfo_owners, "FEDERAL EXECUTIVE - CFO ACT"),
        (active_fed_exec_non_cfo_owners, "FEDERAL EXECUTIVE - NON-CFO ACT"),
    ):
        print "{}:".format(group_name)
        (pipeline, collection) = closed_in_date_range_closed_ticket_age_pl(
            stakeholder_list, start_date, end_date)
        output = db[collection].aggregate(pipeline, cursor={})
        df = DataFrame(list(output))
        median_days_to_mitigate_criticals = round(
            df.loc[df["severity"] == 4]["duration_to_close"].median() /
            (24 * 60 * 60 * 1000.0))
        if isnull(median_days_to_mitigate_criticals):
            print "  Median time to mitigate Critical vulnerabilities: No Critical vulnerabilities mitigated"
            myarg = {
                group_name.replace(" ", "_").replace("-", "_") + "_median_time_to_mitigate":
                "No Critical Vulnerabilities Mitigated"
            }
        else:
            print "  Median time to mitigate Critical vulnerabilities: {0:,g} days".format(
                median_days_to_mitigate_criticals)
            myarg = {
                group_name.replace(" ", "_").replace("-", "_") + "_median_time_to_mitigate":
                "{0:,g}".format(median_days_to_mitigate_criticals)
            }
        mylist.update(myarg)

        (pipeline, collection) = opened_in_date_range_open_ticket_age_pl(
            stakeholder_list, start_date, end_date)
        output = db[collection].aggregate(pipeline, cursor={})
        df = DataFrame(list(output))
        median_days_active_criticals = round(
            df.loc[df["severity"] == 4]["open_ticket_duration"].median() /
            (24 * 60 * 60 * 1000.0))
        if isnull(median_days_active_criticals):
            print "  Median time Critical vulnerabilities currently active: No Critical vulnerabilities currently active"
            myarg = {
                group_name.replace(" ", "_").replace("-", "_") + "_currently_active":
                "No Critical Vulnerabilities Currently Active"
            }
        else:
            print "  Median time Critical vulnerabilities currently active: {0:,g} days".format(
                median_days_active_criticals)
            myarg = {
                group_name.replace(" ", "_").replace("-", "_") + "_currently_active":
                "{0:,g}".format(median_days_active_criticals)
            }
        mylist.update(myarg)
        print

    print "--------------------------------------------------"
    print "Number of Active CyHy Stakeholders by Segment:\n"
    # Use "_stakeholders" lists here because we DO NOT want to include descendent orgs in these metrics
    for (stakeholder_list, group_name) in (
        (active_fed_stakeholders, "FEDERAL"),
        (active_fed_executive_stakeholders, "FEDERAL EXECUTIVE"),
        (active_fed_cfo_stakeholders, "FEDERAL EXECUTIVE - CFO ACT"),
        (active_fed_exec_non_cfo_stakeholders,
         "FEDERAL EXECUTIVE - NON-CFO ACT"),
        (active_SLTT_stakeholders, "SLTT"),
        (active_private_stakeholders, "PRIVATE"),
    ):
        print "{}: {} stakeholders".format(group_name, len(stakeholder_list))
        myarg = {
            group_name.replace(" ", "_").replace("-", "_") + "_active_stakeholders":
            "{0:,g}".format(len(stakeholder_list))
        }
        mylist.update(myarg)

    print "\n--------------------------------------------------"
    print "Number of New Vulnerabilities Detected:\n"
    # Use "_owners" lists here because we DO want to include descendent orgs in these metrics
    for (stakeholder_list, group_name) in (
        (active_fed_owners, "FEDERAL"),
        (active_fed_executive_owners, "FEDERAL EXECUTIVE"),
        (active_fed_cfo_owners, "FEDERAL EXECUTIVE - CFO ACT"),
        (active_fed_exec_non_cfo_owners, "FEDERAL EXECUTIVE - NON-CFO ACT"),
    ):
        (pipeline,
         collection) = tickets_opened_count_pl(stakeholder_list, start_date,
                                               end_date)
        output = list(db[collection].aggregate(pipeline, cursor={}))
        print "{}:".format(group_name)
        for i in ("critical", "high", "medium", "low"):
            try:
                print "  {}: {:,}".format(i.title(), output[0][i])
                myarg = {
                    group_name.replace(" ", "_").replace("-", "_") + "_{}_New_Vulns_Detected".format(i.title(
                    )):
                    "{:,}".format(output[0][i])
                }
            except IndexError:
                print "  {}: 0".format(i.title())
                myarg = {
                    group_name.replace(" ", "_").replace("-", "_") + "_{}_New_Vulns_Detected".format(i.title(
                    )):
                    "0"
                }
            mylist.update(myarg)
        print

    print "--------------------------------------------------"
    print "Number of Vulnerabilities Mitigated:\n"
    # Use "_owners" lists here because we DO want to include descendent orgs in these metrics
    for (stakeholder_list, group_name) in (
        (active_fed_owners, "FEDERAL"),
        (active_fed_executive_owners, "FEDERAL EXECUTIVE"),
        (active_fed_cfo_owners, "FEDERAL EXECUTIVE - CFO ACT"),
        (active_fed_exec_non_cfo_owners, "FEDERAL EXECUTIVE - NON-CFO ACT"),
    ):
        (pipeline,
         collection) = closed_ticket_count_pl(stakeholder_list, start_date,
                                              end_date)
        output = list(db[collection].aggregate(pipeline, cursor={}))
        print "{}:".format(group_name)
        for i in ("critical", "high", "medium", "low"):
            try:
                print "  {}: {:,}".format(i.title(), output[0][i])
                myarg = {
                    group_name.replace(" ", "_").replace("-", "_") + "_{}_New_Vulns_Mitigated".format(i.title(
                    )):
                    "{:,}".format(output[0][i])
                }
            except IndexError:
                print "  {}: 0".format(i.title())
                myarg = {
                    group_name.replace(" ", "_").replace("-", "_") + "_{}_New_Vulns_Mitigated".format(i.title(
                    )):
                    "0"
                }
            mylist.update(myarg)
        print

    print "=================================================="
    print "Active CyHy Federal Stakeholders:\n"
    # Use "_stakeholders" lists here because we DO NOT want to include descendent orgs in these metrics
    for (stakeholder_list, group_name) in (
        (active_fed_stakeholders, "FEDERAL"),
        (active_fed_executive_stakeholders, "FEDERAL EXECUTIVE"),
        (active_fed_cfo_stakeholders, "FEDERAL EXECUTIVE - CFO ACT"),
        (active_fed_exec_non_cfo_stakeholders,
         "FEDERAL EXECUTIVE - NON-CFO ACT"),
    ):
        myarg = {
            group_name.replace(" ", "_").replace("-", "_") + "_Active_CyHy_Stakeholders":
            stakeholder_list
        }
        mylist.update(myarg)
        print "{} ({} agencies): ".format(group_name, len(stakeholder_list)),
        for org in stakeholder_list:
            if org != stakeholder_list[-1]:
                print "{},".format(org),
            else:
                print "{}\n".format(org)

    # import IPython; IPython.embed() #<<< BREAKPOINT >>>
    # sys.exit(0)
    print mylist
    return mylist
예제 #22
0
    def create_snapshot(
        self,
        owner,
        snapshot_oid,
        parent_oid=None,
        descendants_included=[],
        exclude_from_world_stats=False,
        progress_callback=None,
    ):
        """creates a new snapshot document with the oid returned from one of the tagging methods.
        Returns the snapshot if the snapshot is created successfully.
        Returns None if the snapshot is not unique.  In this case reports should be untagged."""
        snapshot_doc = self.__db.SnapshotDoc()
        snapshot_doc["_id"] = snapshot_oid
        snapshot_doc["latest"] = True
        snapshot_doc["owner"] = owner
        snapshot_doc["descendants_included"] = descendants_included
        if parent_oid:
            snapshot_doc["parents"] = [parent_oid]
        else:
            snapshot_doc["parents"] = [
                snapshot_oid
            ]  # If you don't have a parent snapshot, you are your own parent; this prevents deletion of
            # this snap if it ever becomes a child of another snapshot that later gets deleted
        snapshot_doc["networks"] = self.__db.RequestDoc.get_by_owner(
            owner).networks.iter_cidrs()
        for descendant in descendants_included:
            snapshot_doc["networks"] += self.__db.RequestDoc.get_by_owner(
                descendant).networks.iter_cidrs()

        if exclude_from_world_stats:
            snapshot_doc["exclude_from_world_stats"] = True

        current_time = util.utcnow()
        snapshot_doc["last_change"] = current_time

        start_time, end_time = self.__get_tag_timespan(
            snapshot_oid
        )  # Try to get start/end time from host_scan/port_scan/vuln_scan docs
        if (
                start_time == None
        ):  # If org has no latest=true host_scans, port_scans or vuln_scans, start_time will be None
            start_time, end_time = self.__get_host_timespan(
                [owner] + descendants_included
            )  # Try to get start/end time from host docs (not ideal, but better than nothing)
            if (
                    start_time == None
            ):  # If org(s) have no host docs (or hosts that have not been netscanned yet), start_time will be None
                start_time = (
                    end_time
                ) = current_time  # All else has failed- just set start/end time to current time

        snapshot_doc["start_time"] = start_time
        snapshot_doc["end_time"] = end_time

        if progress_callback:
            progress_callback()

        if snapshot_doc.will_conflict():
            snapshot_doc[
                "end_time"] = current_time  # Avoid conflicts by setting end_time to current time
            # Not ideal, but will only happen in rare cases and should have minimal impact

        pipeline_collection = queries.addresses_scanned_pl(
            [owner] + descendants_included)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        database.combine_results(snapshot_doc, results)

        pipeline_collection = queries.cvss_sum_pl(snapshot_oid)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        if results:
            cvss_sum = float(results[0].get("cvss_sum", 0.0))
        else:
            cvss_sum = 0.0

        pipeline_collection = queries.host_count_pl([owner] +
                                                    descendants_included)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        database.combine_results(snapshot_doc, results)

        pipeline_collection = queries.vulnerable_host_count_pl(snapshot_oid)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        database.combine_results(snapshot_doc, results)

        snapshot_doc["cvss_average_all"] = util.safe_divide(
            cvss_sum, snapshot_doc["host_count"])
        snapshot_doc["cvss_average_vulnerable"] = util.safe_divide(
            cvss_sum, snapshot_doc["vulnerable_host_count"])

        pipeline_collection = queries.unique_operating_system_count_pl(
            snapshot_oid)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        database.combine_results(snapshot_doc, results)

        pipeline_collection = queries.port_count_pl(snapshot_oid)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        database.combine_results(snapshot_doc, results)

        pipeline_collection = queries.unique_port_count_pl(snapshot_oid)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        database.combine_results(snapshot_doc, results)

        pipeline_collection = queries.silent_port_count_pl(
            [owner] + descendants_included)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        database.combine_results(snapshot_doc, results)

        pipeline_collection = queries.severity_count_pl(snapshot_oid)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        database.combine_results(snapshot_doc, results, "vulnerabilities")

        pipeline_collection = queries.unique_severity_count_pl(snapshot_oid)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        database.combine_results(snapshot_doc, results,
                                 "unique_vulnerabilities")

        pipeline_collection = queries.false_positives_pl(snapshot_oid)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        database.combine_results(snapshot_doc, results, "false_positives")

        pipeline_collection = queries.service_counts_simple_pl(snapshot_oid)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        services = self.__process_services(results)
        snapshot_doc["services"] = services

        pipeline_collection = queries.open_ticket_age_in_snapshot_pl(
            current_time, snapshot_oid)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        snapshot_doc["tix_msec_open"] = self.__process_open_ticket_age(
            results, current_time)

        tix_closed_since_date = current_time - datetime.timedelta(
            self.SNAPSHOT_CLOSED_TICKET_HISTORY_DAYS)
        pipeline_collection = queries.closed_ticket_age_for_orgs_pl(
            tix_closed_since_date, [owner] + descendants_included)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        snapshot_doc["tix_msec_to_close"] = self.__process_closed_ticket_age(
            results, tix_closed_since_date)

        # reset previous latest flag
        self.__db.SnapshotDoc.reset_latest_flag_by_owner(owner)
        snapshot_doc.save()

        # now calculate the world statistics and update the snapshot
        # Since parent snapshots include data for their descendants, we don't want to count descendant snapshots when calculating world stats
        # NOTE: This won't work if a snapshot is created for a descendant org on it's own after the parent org snapshot was created, but
        #       it's good enough for now.  The world statistics process should get updated as part of CYHY-145.
        snaps_to_exclude_from_world_stats = list()
        all_latest_snapshots = list(
            self.__db.SnapshotDoc.collection.find(
                {"latest": True},
                {
                    "_id": 1,
                    "parents": 1,
                    "exclude_from_world_stats": 1
                },
            ))
        for snap in all_latest_snapshots:
            if (snap["_id"] not in snap["parents"]
                ) or snap.get("exclude_from_world_stats"):
                # NOTE: A descendant snapshot has a different parent id than itself
                snaps_to_exclude_from_world_stats.append(snap["_id"])

        pipeline_collection = queries.world_pl(
            snaps_to_exclude_from_world_stats)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        database.combine_results(snapshot_doc, results, "world")

        snapshot_doc.save()
        return snapshot_doc
예제 #23
0
 def change_ownership(self, orig_owner, new_owner, networks, reason):
     # Change owner on all relevant documents for a given list of networks.
     # Special case for tickets collection; add a CHANGED event to the events list of each ticket
     change_event = {
         "time": util.utcnow(),
         "action": TICKET_EVENT.CHANGED,
         "reason": reason,
         "reference": None,
         "delta": [{
             "from": orig_owner,
             "to": new_owner,
             "key": "owner"
         }],
     }
     for net in networks.iter_cidrs():
         print "Changing owner of network %s to %s" % (net, new_owner)
         for collection, ip_key, update_cmd in (
             (self.__db.hosts, "_id", {
                 "$set": {
                     "owner": new_owner
                 }
             }),
             (self.__db.host_scans, "ip_int", {
                 "$set": {
                     "owner": new_owner
                 }
             }),
             (self.__db.port_scans, "ip_int", {
                 "$set": {
                     "owner": new_owner
                 }
             }),
             (self.__db.vuln_scans, "ip_int", {
                 "$set": {
                     "owner": new_owner
                 }
             }),
             (
                 self.__db.tickets,
                 "ip_int",
                 {
                     "$set": {
                         "owner": new_owner
                     },
                     "$push": {
                         "events": change_event
                     }
                 },
             ),
         ):
             result = collection.update(
                 {ip_key: {
                     "$gte": net.first,
                     "$lte": net.last
                 }},
                 update_cmd,
                 upsert=False,
                 multi=True,
                 safe=True,
             )
             result["collection"] = collection.name
             print "  {nModified} {collection} documents modified".format(
                 **result)
예제 #24
0
PS_1 = {
    "ip": IPS[0],
    "ip_int": long(IPS[0]),
    "port": PORTSCAN_PORTS[1],
    "state": "open",
    "protocol": "tcp",
    "service": "telnet",
    "source": SOURCE_NMAP,
    "source_id": SOURCE_IDS[0],
    "owner": OWNER,
    "severity": 0,
    "cvss_base_score": None,
    "name": "Potentially Risky Service Detected: telnet",
    "_id": ObjectId(),
    "time": util.utcnow(),
    "latest": True,
}
PS_2 = {
    "ip": IPS[1],
    "ip_int": long(IPS[1]),
    "port": PORTSCAN_PORTS[2],
    "state": "open",
    "protocol": "tcp",
    "service": "ldap",
    "source": SOURCE_NMAP,
    "source_id": SOURCE_IDS[0],
    "owner": OWNER,
    "severity": 0,
    "cvss_base_score": None,
    "name": "Potentially Risky Service Detected: ldap",
예제 #25
0
import subprocess
import sys
import threading
import time

from bson import ObjectId
from collections import defaultdict
from docopt import docopt

from cyhy.core import SCAN_TYPE
from cyhy.core.common import REPORT_TYPE, REPORT_PERIOD
from cyhy.db import database, CHDatabase
from cyhy.util import util
from ncats_webd import cybex_queries

current_time = util.utcnow()

LOGGING_LEVEL = logging.INFO
LOG_FILE = "snapshots_reports_scorecard_automation.log"
REPORT_THREADS = 18
SNAPSHOT_THREADS = 32

NCATS_DHUB_URL = "dhub.ncats.cyber.dhs.gov:5001"
NCATS_WEB_URL = "web.data.ncats.cyber.dhs.gov"

WEEKLY_REPORT_BASE_DIR = "/var/cyhy/reports/output"
SCORECARD_OUTPUT_DIR = "scorecards"
SCORECARD_JSON_OUTPUT_DIR = "JSONfiles"
CYBEX_CSV_DIR = "cybex_csvs"
CYHY_REPORT_DIR = os.path.join(
    "report_archive", "reports{}".format(current_time.strftime("%Y%m%d")))
예제 #26
0
def get_open_tickets_dataframe(db, ticket_severity):
    now = util.utcnow()
    first_report_time_cache = (
        dict())  # Cache generated_time of first report for each ticket

    fed_executive_owners = db.RequestDoc.get_all_descendants("EXECUTIVE")
    tix = db.TicketDoc.find(
        {
            "source": "nessus",
            "details.severity": ticket_severity,
            "false_positive": False,
            "owner": {
                "$in": fed_executive_owners
            },
            "open": True,
        },
        {
            "_id": False,
            "owner": True,
            "time_opened": True,
            "ip": True,
            "port": True,
            "details.name": True,
            "details.cve": True,
            "snapshots": True,
        },
    )

    tix = list(tix)
    for x in tix:
        x.update(x["details"])
        del x["details"]
        x["days_since_first_detected"] = (
            now - x["time_opened"]).total_seconds() / (60 * 60 * 24)
        x["first_reported"] = None
        x["days_since_first_reported"] = None
        x["days_to_report"] = None

        for snap_id in x.get("snapshots", []):
            x["first_reported"] = first_report_time_cache.get(snap_id)
            if x["first_reported"]:
                # Found this snapshot's report time in the cache
                break
            else:
                # Not found in the cache, so make a database call
                first_report = db.reports.find_one({"snapshot_oid": snap_id})
                if first_report:
                    x["first_reported"] = first_report.get("generated_time")
                    first_report_time_cache[snap_id] = x["first_reported"]
                    break

        if x["first_reported"]:
            x["days_since_first_reported"] = (
                now - x["first_reported"]).total_seconds() / (60 * 60 * 24)
            x["days_to_report"] = (x["days_since_first_detected"] -
                                   x["days_since_first_reported"])

        if x.get("snapshots"):
            del x["snapshots"]

    df = DataFrame(tix)
    if not df.empty:
        df.sort_values(by="days_since_first_detected",
                       ascending=False,
                       inplace=True)
    return df