Ejemplo n.º 1
0
def monitor_nmap_scan(sensor_id, task_id):
    """Monitors an NMAP scan
    Args:
        sensor_id: The sensor id where the NMAP is working.
        task_id: The celery task id that is launching the NMAP
    """
    task = is_task_in_celery(task_id)
    while task is not None:
        try:
            try:
                job = apimethod_nmapdb_get_task(task_id)
            except APINMAPScanException:
                job = None

            if job is not None and job["status"] == "In Progress":
                # check status
                try:
                    data = apimethod_monitor_nmap_scan(sensor_id, task_id)
                except Exception as error:
                    logger.error(
                        "[monitor_nmap_scan:%s] Cannot retrieve scan data...%s"
                        % (task_id, str(error)))
                else:
                    # Scan status is managed by the main scan task. This task should only monitor and retrieve
                    # the scan results...
                    job["scanned_hosts"] = data['scanned_hosts']
                    if data['target_number'] > 0:
                        job["target_number"] = data['target_number']
                        # Estimate end time
                    if data['scanned_hosts'] > 0:
                        average_sec = int((time.time() - job["start_time"]) /
                                          data['scanned_hosts'])
                        job["remaining_time"] = (
                            data['target_number'] -
                            data['scanned_hosts']) * average_sec
                    try:
                        apimethod_nmapdb_update_task(task_id, job)
                    except Exception as error:
                        logger.error(
                            "[monitor_nmap_scan:%s] Cannot update nmap scan status...%s"
                            % (task_id, str(error)))
        except Exception as error:
            logger.error(
                "[monitor_nmap_scan:%s] Unexpected exception while monitoring the NMAP scan status...%s:%s"
                % (task_id, type(error), str(error)))
        time.sleep(10)
        task = is_task_in_celery(task_id)
    logger.warning(
        "[monitor_nmap_scan:%s] It seems that the SCAN has finished.." %
        str(task_id))
    return True
Ejemplo n.º 2
0
def apimethod_get_nmap_scan_status(task_id):
    """Returns the nmap status for the given task
    Args:
        task_id: The task id which status you want to know
    Returns:
        job(str): A python dic with the job information.
    Raises:
        APINMAPScanKeyNotFound: When the given id doesn't exist
        APINMAPScanException: When something wrong happen
    """

    try:
        # the nmap could be scheduled in celery but not launched.
        # in this case there is no nmap status on the database.
        job = None
        db = NMAPScansDB()
        tries = 3
        while tries > 0:
            try:
                raw_data = db.get(task_id)
                job = ast.literal_eval(raw_data)
                tries = 0
            except RedisDBKeyNotFound:
                # Maybe the job is not in the database yet
                # check if the job is scheduled.
                task = is_task_in_celery(task_id)
                if task is not None:
                    if task_id == task['id']:
                        task_kwargs = task['kwargs']
                        # La info va a de kwargs
                        job = {"job_id": task['id'],
                               "sensor_id": task_kwargs['sensor_id'],
                               "idm": task_kwargs['idm'],
                               "target_number": task_kwargs['targets_number'],
                               "scan_params": {"target": task_kwargs['target'],
                                               "scan_type": task_kwargs['scan_type'],
                                               "rdns": task_kwargs['rdns'],
                                               "autodetect": task_kwargs['autodetect'],
                                               "scan_timing": task_kwargs['scan_timing'],
                                               "scan_ports": task_kwargs['scan_ports']},
                               "status": "In Progress",
                               "scanned_hosts": 0,
                               "scan_user": task_kwargs['scan_ports'],
                               "start_time": int(time.time()),
                               "end_time": -1,
                               "remaining_time": -1
                               }
                        tries = 0

                time.sleep(1)
            tries -= 1
    except Exception as e:
        raise APINMAPScanException(str(e))
    finally:
        del db
    if job is None:
        raise APINMAPScanKeyNotFound()
    return job
Ejemplo n.º 3
0
def monitor_nmap_scan(sensor_id, task_id):
    """Monitors an NMAP scan
    Args:
        sensor_id: The sensor id where the NMAP is working.
        task_id: The celery task id that is launching the NMAP
    """
    task = is_task_in_celery(task_id)
    while task is not None:
        try:
            try:
                job = apimethod_nmapdb_get_task(task_id)
            except APINMAPScanException:
                job = None

            if job is not None and job["status"] == "In Progress":
                # check job status
                try:
                    data = apimethod_monitor_nmap_scan(sensor_id, task_id)
                except Exception as error:
                    logger.error("[monitor_nmap_scan:%s] Cannot retrieve scan data...%s" % (task_id, str(error)))
                else:
                    # Scan status is managed by the main scan task. This task should only monitor and retrieve
                    # the scan results...
                    job["scanned_hosts"] = data['scanned_hosts']
                    if data['target_number'] > 0:
                        job["target_number"] = data['target_number']
                        # Estimate end time
                    if data['scanned_hosts'] > 0:
                        average_sec = int((time.time() - job["start_time"]) / data['scanned_hosts'])
                        job["remaining_time"] = (data['target_number'] - data['scanned_hosts']) * average_sec
                    try:
                        apimethod_nmapdb_update_task(task_id, job)
                    except Exception as error:
                        logger.error("[monitor_nmap_scan:%s] Cannot update nmap scan status...%s" % (task_id,
                                                                                                     str(error)))
        except Exception as error:
            logger.error("[monitor_nmap_scan:%s] Unexpected exception while monitoring the NMAP scan status..."
                         "%s:%s" % (task_id, type(error), str(error)))
        time.sleep(10)
        task = is_task_in_celery(task_id)
    logger.warning("[monitor_nmap_scan:%s] It seems that the SCAN has finished.." % str(task_id))
    return True
Ejemplo n.º 4
0
    def get_job_status(job_id):
        """
        Returns the job status or None if the job doesn't exist
        @param job_id: job id string to check in canonical uuid format
        @return: a celery.states.state object or None
        """
        job_status = None

        job_id_bytes = apimethods.utils.get_bytes_from_uuid(job_id)
        job_status = db_get_job_status(job_id_bytes)
        if job_status:
            return pickle.loads(job_status.info)
        else:
            # Maybe it's not in the database yet. Let's inspect celery queues.
            job_status = is_task_in_celery(job_id)
            # When the task is stored in the database it is stored with the following formant
            #              {u'retries': 0,
            #              u'expires': None,
            #              u'uuid': u'e8e2a4ce-bc09-47f8-9ce8-6d3798acc2eb',
            #              u'clock': 49,
            #              u'timestamp':
            #              1441353908.687852,
            #              u'args': u"[u'DF08B5C7521111E5A4AE000C295288BF', u'5DADB3A8662F91844C62172498696BB1', u'192.168.2.213', u'crosa', u'alien4ever!', u'bosquewin2008.alienvault.com', u'001']",
            #              u'eta': None,
            #              u'kwargs': u'{}',
            #              'type': u'task-received',
            #              u'hostname': u'w1.VirtualUSMAllInOneLite',
            #              u'name': u'celerymethods.jobs.ossec_win_deploy.ossec_win_deploy'}
            # And when the task is obtained from the inspect, the task has the following format.
            # We use the type
            #             {u'hostname': u'w1.VirtualUSMAllInOneLite',
            #              u'time_start': 1441354117.651205,
            #              u'name': u'celerymethods.jobs.ossec_win_deploy.ossec_win_deploy',
            #              u'delivery_info': {u'routing_key': u'celery', u'exchange': u'celery'},
            #              u'args': u"[u'DF08B5C7521111E5A4AE000C295288BF', u'5DADB3A8662F91844C62172498696BB1', u'192.168.2.213', u'crosa', u'alien4ever!', u'bosquewin2008.alienvault.com', u'001']",
            #              u'acknowledged': True,
            #              u'kwargs': u'{}', u'id': u'720d8fdb-9c2a-4be7-8889-5244d8c980df',
            #              u'worker_pid': 14330}
            if job_status is not None:
                job_status['type'] = "task-received"
            return job_status