示例#1
0
    def __get_tag_timespan(self, oid):
        """determines the earliest, and latest times of documents"""
        pipeline = queries.time_span(oid)
        r1 = database.run_pipeline_cursor(
            (pipeline, database.HOST_SCAN_COLLECTION), self.__db)
        r2 = database.run_pipeline_cursor(
            (pipeline, database.PORT_SCAN_COLLECTION), self.__db)
        r3 = database.run_pipeline_cursor(
            (pipeline, database.VULN_SCAN_COLLECTION), self.__db)
        database.id_expand(r1)
        database.id_expand(r2)
        database.id_expand(r3)

        spans = []
        if r1:
            spans.append(r1[0])
        if r2:
            spans.append(r2[0])
        if r3:
            spans.append(r3[0])

        if len(spans) == 0:
            return None, None
        else:
            start_time = min([i["start_time"] for i in spans])
            end_time = max([i["end_time"] for i in spans])
            return start_time, end_time
示例#2
0
    def __run_queries(self):        
        fed_executive_orgs = self.__db.RequestDoc.find_one({'_id':'EXECUTIVE'})['children']
        # Get request docs for all FEDERAL EXECUTIVE orgs, except those in EXEMPT_ORGS list
        self.__requests = list(self.__db.RequestDoc.find({'_id':{'$in':fed_executive_orgs, '$nin':EXEMPT_ORGS}}))
        
        # Build up list of FED EXECUTIVE tallies updated within past CURRENTLY_SCANNED_DAYS days
        for tally in list(self.__db.TallyDoc.find({'_id':{'$in':fed_executive_orgs, '$nin':EXEMPT_ORGS}})):
            if tally['last_change'] >= self.__generated_time - timedelta(days=CURRENTLY_SCANNED_DAYS):
                self.__tallies.append(tally)                # Append the tally if it's been changed recently
            else:       # Check if this org has any descendants with tallies that have been changed recently
                tally_descendant_orgs = self.__db.RequestDoc.get_all_descendants(tally['_id'])
                if tally_descendant_orgs:
                    for tally_descendant in list(self.__db.TallyDoc.find({'_id':{'$in':tally_descendant_orgs}})):
                        if tally_descendant['last_change'] >= self.__generated_time - timedelta(days=CURRENTLY_SCANNED_DAYS):
                            self.__tallies.append(tally)    # Append the top-level org's tally if the descendant has been changed recently
                            break                           # No need to check any other descendants

        # If an org has descendants, we only want the top-level org to show up in the Scorecard
        # Make list of orgs that have children and their request docs so their child data can be accumulated later
        orgs_with_descendants = []
        requests_with_descendants = []
        for r in self.__requests:
            if r.get('children'):
                #all_descendants += r['children']
                orgs_with_descendants.append(r['_id'])
                requests_with_descendants.append(r)
                        
        # Get relevant ticket age data
        pipeline_collection = queries.open_ticket_age_pl(self.__generated_time)
        self.__results['open_ticket_age'] = database.run_pipeline_cursor(pipeline_collection, self.__db)
        pipeline_collection = queries.closed_ticket_age_pl(self.__generated_time - timedelta(days=CLOSED_TICKETS_DAYS))
        self.__results['closed_ticket_age'] = database.run_pipeline_cursor(pipeline_collection, self.__db)
        
        # Throw out ticket data from orgs with descendants
        # list(<ticket_data>) iterates over a *copy* of the list so items can be properly removed from the original
        for r in list(self.__results['open_ticket_age']):
            if r['_id']['owner'] in orgs_with_descendants:
                self.__results['open_ticket_age'].remove(r)
                
        for r in list(self.__results['closed_ticket_age']):
            if r['_id']['owner'] in orgs_with_descendants:
                self.__results['closed_ticket_age'].remove(r)
                
        # Pull grouped ticket age data for orgs with descendants and add it to results
        for r in requests_with_descendants:
            descendants = self.__db.RequestDoc.get_all_descendants(r['_id'])
            pipeline_collection = queries.open_ticket_age_for_orgs_pl(self.__generated_time, r['_id'], descendants)
            self.__results['open_ticket_age'] += database.run_pipeline_cursor(pipeline_collection, self.__db)
            pipeline_collection = queries.closed_ticket_age_for_orgs_pl(self.__generated_time - timedelta(days=CLOSED_TICKETS_DAYS), r['_id'], descendants)
            self.__results['closed_ticket_age'] += database.run_pipeline_cursor(pipeline_collection, self.__db)
示例#3
0
 def __host_max_severity(self, host):
     ip_int = host["_id"]
     q = max_severity_for_host(ip_int)
     r = database.run_pipeline_cursor(q, self._db)
     database.id_expand(r)
     if len(r) > 0:
         # found tickets
         return r[0]["severity_max"]
     else:
         # no tickets
         return 0
示例#4
0
 def clear_vuln_latest_flags(self):
     """clear the latest flag for vuln_docs that match the ticket_manager scope"""
     ip_ints = [int(i) for i in self.__ips]
     pipeline = clear_latest_vulns_pl(
         ip_ints, list(self.__ports), list(self.__source_ids), self.__source
     )
     raw_vulns = database.run_pipeline_cursor(pipeline, self.__db)
     for raw_vuln in raw_vulns:
         vuln = self.__db.VulnScanDoc(raw_vuln)
         vuln["latest"] = False
         vuln.save()
示例#5
0
    def close_tickets(self):
        if self.__closing_time is None:
            # You don't have to go home but you can't stay here
            self.__closing_time = util.utcnow()
        ip_ints = [int(i) for i in self.__ips]

        # find tickets that are covered by this scan, but weren't just touched
        # TODO: this is the way I wanted to do it, but it blows up mongo
        # tickets = self.__db.TicketDoc.find({'ip_int':{'$in':ip_ints},
        #                                     'port':{'$in':self.__ports},
        #                                     'source_id':{'$in':self.__source_ids},
        #                                     '_id':{'$nin':list(self.__seen_ticket_ids)},
        #                                     'source':self.__source,
        #                                     'open':True})

        # work-around using a pipeline
        tickets = database.run_pipeline_cursor(
            close_tickets_pl(
                ip_ints,
                list(self.__ports),
                list(self.__source_ids),
                list(self.__seen_ticket_ids),
                self.__source,
            ),
            self.__db,
        )

        for raw_ticket in tickets:
            ticket = self.__db.TicketDoc(raw_ticket)  # make it managed
            # don't close tickets that are false_positives, just add event
            reason = "vulnerability not detected"
            self.__check_false_positive_expiration(
                ticket, self.__closing_time.replace(tzinfo=tz.tzutc())
            )  # explicitly set to UTC (see CYHY-286)
            if ticket["false_positive"] is True:
                event = {
                    "time": self.__closing_time,
                    "action": TICKET_EVENT.UNVERIFIED,
                    "reason": reason,
                    "reference": None,
                }
            else:
                ticket["open"] = False
                ticket["time_closed"] = self.__closing_time
                event = {
                    "time": self.__closing_time,
                    "action": TICKET_EVENT.CLOSED,
                    "reason": reason,
                    "reference": None,
                }
            if self.__manual_scan:
                event["manual"] = True
            ticket["events"].append(event)
            ticket.save()
示例#6
0
    def __get_host_timespan(self, owners):
        """determines the earliest and latest last_changed times of hosts"""
        pipeline_collection = queries.host_time_span(owners)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        database.id_expand(results)

        if len(results) == 0:
            return None, None  # owner has no host docs
        else:
            start_time = results[0]["start_time"]
            end_time = results[0]["end_time"]
            return start_time, end_time
 def __run_queries(self):
     # Get request docs for all orgs that have BOD in their report_types
     self.__requests = list(self.__db.RequestDoc.find({'report_types':REPORT_TYPE.BOD}))
     bod_orgs = []
     for r in self.__requests:
         bod_orgs.append(r['_id'])
     
     # Build up list of BOD org tallies that were updated within past CURRENTLY_SCANNED_DAYS days
     for tally in list(self.__db.TallyDoc.find({'_id':{'$in':bod_orgs}})):
         if tally['last_change'] >= self.__generated_time - timedelta(days=CURRENTLY_SCANNED_DAYS):
             self.__tallies.append(tally)                # Append the tally if it's been changed recently
         else:       # Check if this org has any descendants with tallies that have been changed recently
             tally_descendant_orgs = self.__db.RequestDoc.get_all_descendants(tally['_id'])
             if tally_descendant_orgs:
                 for tally_descendant in list(self.__db.TallyDoc.find({'_id':{'$in':tally_descendant_orgs}})):
                     if tally_descendant['last_change'] >= self.__generated_time - timedelta(days=CURRENTLY_SCANNED_DAYS):
                         self.__tallies.append(tally)    # Append the top-level org's tally if the descendant has been changed recently
                         break                           # No need to check any other descendants
                                 
     # Get list of 'CFO Act' orgs
     self.__cfo_act_orgs = self.__db.RequestDoc.find_one({'_id':'FED_CFO_ACT'})['children']
     
     # If an org has descendants, we only want the top-level org to show up in the Scorecard
     # Make list of orgs that have children and their request docs so their child data can be accumulated later
     orgs_with_descendants = []
     requests_with_descendants = []
     for r in self.__requests:
         if r.get('children'):
             orgs_with_descendants.append(r['_id'])
             requests_with_descendants.append(r)
     
     # Get relevant critical ticket data
     pipeline_collection = self.__open_critical_tix_opened_in_date_range_pl(BEFORE_THE_DAWN_OF_CYHY, self.__generated_time)
     self.__results['open_critical_tix'] = database.run_pipeline_cursor(pipeline_collection, self.__db)
     prev_scorecard_generated_time = parser.parse(self.__previous_scorecard_data['generated_time'])
     pipeline_collection = self.__open_critical_tix_opened_in_date_range_pl(prev_scorecard_generated_time, self.__generated_time)
     self.__results['open_critical_tix_opened_since_previous_scorecard'] = database.run_pipeline_cursor(pipeline_collection, self.__db)
     pipeline_collection = self.__open_critical_tix_opened_in_date_range_pl(self.__generated_time - timedelta(days=30), self.__generated_time)
     self.__results['open_critical_tix_opened_less_than_30_days_ago'] = database.run_pipeline_cursor(pipeline_collection, self.__db)
     pipeline_collection = self.__open_critical_tix_opened_in_date_range_pl(BEFORE_THE_DAWN_OF_CYHY, self.__generated_time - timedelta(days=90))
     self.__results['open_critical_tix_opened_more_than_90_days_ago'] = database.run_pipeline_cursor(pipeline_collection, self.__db)
     pipeline_collection = self.__critical_tix_open_on_date_open_since_date_pl(self.__bod_effective_date, self.__bod_effective_date)
     self.__results['critical_tix_open_at_bod_start'] = database.run_pipeline_cursor(pipeline_collection, self.__db)
     pipeline_collection = self.__closed_critical_tix_open_on_date_pl(self.__bod_effective_date, self.__generated_time)
     self.__results['critical_tix_open_at_bod_start_now_closed'] = database.run_pipeline_cursor(pipeline_collection, self.__db)
     pipeline_collection = self.__critical_tix_open_on_date_open_since_date_pl(self.__bod_effective_date, 
                                                                               self.__bod_effective_date - timedelta(days=30))
     self.__results['critical_tix_open_more_than_30_days_at_bod_start'] = database.run_pipeline_cursor(pipeline_collection, self.__db)
     pipeline_collection = self.__active_hosts_pl()
     self.__results['active_hosts'] = database.run_pipeline_cursor(pipeline_collection, self.__db)
     
     # Throw out data from orgs with descendants
     # list(self.__results[results_field]) iterates over a *copy* of the list so items can be properly removed from the original
     for results_field in ['open_critical_tix', 'open_critical_tix_opened_less_than_30_days_ago',
                           'open_critical_tix_opened_since_previous_scorecard',
                           'open_critical_tix_opened_more_than_90_days_ago',
                           'critical_tix_open_at_bod_start', 'critical_tix_open_at_bod_start_now_closed',
                           'critical_tix_open_more_than_30_days_at_bod_start', 'active_hosts']:
                           for r in list(self.__results[results_field]):
                               if r['_id']['owner'] in orgs_with_descendants:
                                   self.__results[results_field].remove(r)
                                   
     # Pull grouped data for orgs with descendants and add it to results
     for r in requests_with_descendants:
         descendants = self.__db.RequestDoc.get_all_descendants(r['_id'])
         pipeline_collection = self.__open_critical_tix_opened_in_date_range_for_orgs_pl(BEFORE_THE_DAWN_OF_CYHY, self.__generated_time, r['_id'], descendants)
         self.__results['open_critical_tix'] += database.run_pipeline_cursor(pipeline_collection, self.__db)
         pipeline_collection = self.__open_critical_tix_opened_in_date_range_for_orgs_pl(prev_scorecard_generated_time, self.__generated_time, r['_id'], descendants)
         self.__results['open_critical_tix_opened_since_previous_scorecard'] += database.run_pipeline_cursor(pipeline_collection, self.__db)
         pipeline_collection = self.__open_critical_tix_opened_in_date_range_for_orgs_pl(self.__generated_time - timedelta(days=30), self.__generated_time, r['_id'], descendants)
         self.__results['open_critical_tix_opened_less_than_30_days_ago'] += database.run_pipeline_cursor(pipeline_collection, self.__db)
         pipeline_collection = self.__open_critical_tix_opened_in_date_range_for_orgs_pl(BEFORE_THE_DAWN_OF_CYHY, self.__generated_time - timedelta(days=90), r['_id'], descendants)
         self.__results['open_critical_tix_opened_more_than_90_days_ago'] += database.run_pipeline_cursor(pipeline_collection, self.__db)
         pipeline_collection = self.__critical_tix_open_on_date_open_since_date_for_orgs_pl(self.__bod_effective_date, self.__bod_effective_date, r['_id'], descendants)
         self.__results['critical_tix_open_at_bod_start'] += database.run_pipeline_cursor(pipeline_collection, self.__db)
         pipeline_collection = self.__closed_critical_tix_open_on_date_for_orgs_pl(self.__bod_effective_date, self.__generated_time, r['_id'], descendants)
         self.__results['critical_tix_open_at_bod_start_now_closed'] += database.run_pipeline_cursor(pipeline_collection, self.__db)
         pipeline_collection = self.__critical_tix_open_on_date_open_since_date_for_orgs_pl(self.__bod_effective_date, self.__bod_effective_date - timedelta(days=30), r['_id'], descendants)
         self.__results['critical_tix_open_more_than_30_days_at_bod_start'] += database.run_pipeline_cursor(pipeline_collection, self.__db)
         
         pipeline_collection = self.__active_hosts_for_orgs_pl(r['_id'], descendants)
         self.__results['active_hosts'] += database.run_pipeline_cursor(pipeline_collection, self.__db)
示例#8
0
def json_get_first_seen_ticket_counts():
    cursor = database.run_pipeline_cursor(first_seen_ticket_counts(),
                                          current_app.db)
    counts = list(cursor)
    database.id_expand(counts)
    return counts
示例#9
0
    def create_snapshot(
        self,
        owner,
        snapshot_oid,
        parent_oid=None,
        descendants_included=[],
        exclude_from_world_stats=False,
        progress_callback=None,
    ):
        """creates a new snapshot document with the oid returned from one of the tagging methods.
        Returns the snapshot if the snapshot is created successfully.
        Returns None if the snapshot is not unique.  In this case reports should be untagged."""
        snapshot_doc = self.__db.SnapshotDoc()
        snapshot_doc["_id"] = snapshot_oid
        snapshot_doc["latest"] = True
        snapshot_doc["owner"] = owner
        snapshot_doc["descendants_included"] = descendants_included
        if parent_oid:
            snapshot_doc["parents"] = [parent_oid]
        else:
            snapshot_doc["parents"] = [
                snapshot_oid
            ]  # If you don't have a parent snapshot, you are your own parent; this prevents deletion of
            # this snap if it ever becomes a child of another snapshot that later gets deleted
        snapshot_doc["networks"] = self.__db.RequestDoc.get_by_owner(
            owner).networks.iter_cidrs()
        for descendant in descendants_included:
            snapshot_doc["networks"] += self.__db.RequestDoc.get_by_owner(
                descendant).networks.iter_cidrs()

        if exclude_from_world_stats:
            snapshot_doc["exclude_from_world_stats"] = True

        current_time = util.utcnow()
        snapshot_doc["last_change"] = current_time

        start_time, end_time = self.__get_tag_timespan(
            snapshot_oid
        )  # Try to get start/end time from host_scan/port_scan/vuln_scan docs
        if (
                start_time == None
        ):  # If org has no latest=true host_scans, port_scans or vuln_scans, start_time will be None
            start_time, end_time = self.__get_host_timespan(
                [owner] + descendants_included
            )  # Try to get start/end time from host docs (not ideal, but better than nothing)
            if (
                    start_time == None
            ):  # If org(s) have no host docs (or hosts that have not been netscanned yet), start_time will be None
                start_time = (
                    end_time
                ) = current_time  # All else has failed- just set start/end time to current time

        snapshot_doc["start_time"] = start_time
        snapshot_doc["end_time"] = end_time

        if progress_callback:
            progress_callback()

        if snapshot_doc.will_conflict():
            snapshot_doc[
                "end_time"] = current_time  # Avoid conflicts by setting end_time to current time
            # Not ideal, but will only happen in rare cases and should have minimal impact

        pipeline_collection = queries.addresses_scanned_pl(
            [owner] + descendants_included)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        database.combine_results(snapshot_doc, results)

        pipeline_collection = queries.cvss_sum_pl(snapshot_oid)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        if results:
            cvss_sum = float(results[0].get("cvss_sum", 0.0))
        else:
            cvss_sum = 0.0

        pipeline_collection = queries.host_count_pl([owner] +
                                                    descendants_included)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        database.combine_results(snapshot_doc, results)

        pipeline_collection = queries.vulnerable_host_count_pl(snapshot_oid)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        database.combine_results(snapshot_doc, results)

        snapshot_doc["cvss_average_all"] = util.safe_divide(
            cvss_sum, snapshot_doc["host_count"])
        snapshot_doc["cvss_average_vulnerable"] = util.safe_divide(
            cvss_sum, snapshot_doc["vulnerable_host_count"])

        pipeline_collection = queries.unique_operating_system_count_pl(
            snapshot_oid)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        database.combine_results(snapshot_doc, results)

        pipeline_collection = queries.port_count_pl(snapshot_oid)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        database.combine_results(snapshot_doc, results)

        pipeline_collection = queries.unique_port_count_pl(snapshot_oid)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        database.combine_results(snapshot_doc, results)

        pipeline_collection = queries.silent_port_count_pl(
            [owner] + descendants_included)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        database.combine_results(snapshot_doc, results)

        pipeline_collection = queries.severity_count_pl(snapshot_oid)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        database.combine_results(snapshot_doc, results, "vulnerabilities")

        pipeline_collection = queries.unique_severity_count_pl(snapshot_oid)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        database.combine_results(snapshot_doc, results,
                                 "unique_vulnerabilities")

        pipeline_collection = queries.false_positives_pl(snapshot_oid)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        database.combine_results(snapshot_doc, results, "false_positives")

        pipeline_collection = queries.service_counts_simple_pl(snapshot_oid)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        services = self.__process_services(results)
        snapshot_doc["services"] = services

        pipeline_collection = queries.open_ticket_age_in_snapshot_pl(
            current_time, snapshot_oid)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        snapshot_doc["tix_msec_open"] = self.__process_open_ticket_age(
            results, current_time)

        tix_closed_since_date = current_time - datetime.timedelta(
            self.SNAPSHOT_CLOSED_TICKET_HISTORY_DAYS)
        pipeline_collection = queries.closed_ticket_age_for_orgs_pl(
            tix_closed_since_date, [owner] + descendants_included)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        snapshot_doc["tix_msec_to_close"] = self.__process_closed_ticket_age(
            results, tix_closed_since_date)

        # reset previous latest flag
        self.__db.SnapshotDoc.reset_latest_flag_by_owner(owner)
        snapshot_doc.save()

        # now calculate the world statistics and update the snapshot
        # Since parent snapshots include data for their descendants, we don't want to count descendant snapshots when calculating world stats
        # NOTE: This won't work if a snapshot is created for a descendant org on it's own after the parent org snapshot was created, but
        #       it's good enough for now.  The world statistics process should get updated as part of CYHY-145.
        snaps_to_exclude_from_world_stats = list()
        all_latest_snapshots = list(
            self.__db.SnapshotDoc.collection.find(
                {"latest": True},
                {
                    "_id": 1,
                    "parents": 1,
                    "exclude_from_world_stats": 1
                },
            ))
        for snap in all_latest_snapshots:
            if (snap["_id"] not in snap["parents"]
                ) or snap.get("exclude_from_world_stats"):
                # NOTE: A descendant snapshot has a different parent id than itself
                snaps_to_exclude_from_world_stats.append(snap["_id"])

        pipeline_collection = queries.world_pl(
            snaps_to_exclude_from_world_stats)
        results = database.run_pipeline_cursor(pipeline_collection, self.__db)
        if progress_callback:
            progress_callback()
        database.combine_results(snapshot_doc, results, "world")

        snapshot_doc.save()
        return snapshot_doc