Example #1
0
 def __init__(self):
     super(ScanOnConfig, self).__init__()
     self._config = Configuration()
     self._sql = SQLAlchemyConnection(self._config)
     self._scanner_config = DetectorConfiguration.ConfigurationLoader()
     self._context = '{}'
     self._source_data = {}
     self._importer = Importer(self._ioc.message())
     self._duplification_filter = SourceDeDuplify(self._ioc.message())
Example #2
0
 def __init__(self):
     super(Detector, self).__init__()
     self._config = Configuration()
     self._sql = SQLAlchemyConnection(self._config)
     self._excelsior_config = DetectorConfiguration.ConfigurationLoader()
     self._importer = Importer(self._ioc.message())
Example #3
0
class Detector(GreaseDaemonCommand):
    def __init__(self):
        super(Detector, self).__init__()
        self._config = Configuration()
        self._sql = SQLAlchemyConnection(self._config)
        self._excelsior_config = DetectorConfiguration.ConfigurationLoader()
        self._importer = Importer(self._ioc.message())

    def __del__(self):
        super(Detector, self).__del__()
        self._sql.get_session().close()

    def execute(self, context='{}'):
        # first lets see if we have some stuff to parse
        result = self._sql.get_session().query(SourceData, JobServers)\
            .filter(JobServers.id == SourceData.detection_server)\
            .filter(JobServers.id == self._config.node_db_id())\
            .filter(SourceData.detection_start_time == None)\
            .filter(SourceData.detection_end_time == None)\
            .filter(SourceData.detection_complete == False)\
            .limit(15)\
            .all()
        if not result:
            self._ioc.message().debug(
                "No sources scheduled for detection on this node", True)
            return True
        else:
            # Now lets loop through
            self._ioc.message().debug(
                "TOTAL SOURCE DOCUMENTS RETURNED: [{0}]".format(len(result)),
                True)
            for source in result:
                # first claiming them as ours then parsing them
                self._take_ownership(source.SourceData.id)
                # now lets parse the sources
                self._ioc.message().debug(
                    "PROCESSING SOURCE ID: [{0}]".format(source.SourceData.id),
                    True)
                parsed_source = self._parse_source(
                    source.SourceData.source_data,
                    self._excelsior_config.get_scanner_config(
                        source.SourceData.scanner))
                self._complete(source.SourceData.id)
                # now lets assign this parsed source out
                if self._schedule_scheduling(source.SourceData.id,
                                             parsed_source):
                    self._ioc.message().info(
                        "Successfully Scheduled Parsed Source ID: [" +
                        str(source.SourceData.id) + "]")
                    continue
                else:
                    self._reverse(source.SourceData.id)
                    self._ioc.message().error(
                        "Failed To Schedule Parsed Source ID: [" +
                        str(source.SourceData.id) + "]")
                    continue
        return True

    def _take_ownership(self, source_file_id):
        # type: (int) -> None
        stmt = update(SourceData)\
            .where(SourceData.id == source_file_id)\
            .values(detection_start_time=datetime.utcnow())
        self._sql.get_session().execute(stmt)
        self._sql.get_session().commit()
        self._ioc.message().debug(
            "TAKING OWNERSHIP OF SOURCE ID: [{0}]".format(source_file_id),
            True)

    def _complete(self, source_file_id):
        # type: (int) -> None
        stmt = update(SourceData)\
            .where(SourceData.id == source_file_id)\
            .values(detection_complete=True, detection_end_time=datetime.utcnow())
        self._sql.get_session().execute(stmt)
        self._sql.get_session().commit()
        self._ioc.message().debug(
            "COMPLETING SOURCE ID: [{0}]".format(source_file_id), True)

    def _reverse(self, source_file_id):
        stmt = update(SourceData)\
            .where(SourceData.id == source_file_id)\
            .values(detection_start_time=None, detection_end_time=None, detection_complete=None)
        self._sql.get_session().execute(stmt)
        self._sql.get_session().commit()
        self._ioc.message().debug(
            "SOURCE FILE FAILED SCHEDULING REVERSING FILE: [{0}]".format(
                source_file_id), True)

    def _parse_source(self, sources, rule_set):
        # type: (dict, list) -> dict
        # lets parse the source
        # now lets get the data out of it
        # now lets loop through those results
        # first lets setup some state tracking stuff
        final_source_data = defaultdict()
        index = 0
        total_records = len(sources)
        for source in sources:
            self._ioc.message().debug(
                "PROCESSING OBJECT [{0}] OF [{1}] INTERNAL STRUCTURE: [{2}]".
                format(str(index + 1), str(total_records), str(source)), True)
            # first lets initialize all the source data into the object
            final_source_data[index] = source
            # now lets create our rule_processing key
            final_source_data[index]['rule_processing'] = defaultdict()
            # now lets loop through the rules we want to parse this source with
            for rule in rule_set:
                self._ioc.message().debug(
                    "PROCESSING RULE [{0}]".format(str(rule['name'])), True)
                # first lets make the rule processing result key
                final_source_data[index]['rule_processing'][
                    rule['name']] = defaultdict()
                # next lets compute each rule in the rule processor
                # Yes another for loop but I promise this will be fast
                # look at some point we have to iterate over data
                for detector, detector_config in rule['logic'].iteritems():
                    # lets try to new up this detector
                    detector_instance = self._importer.load(
                        'tgt_grease_enterprise.Detectors', detector, True)
                    if isinstance(detector_instance, Detectors.BaseDetector):
                        self._ioc.message().debug(
                            "PROCESSING RULE [{0}] LOGICAL BLOCK [{1}]".format(
                                str(rule['name']), str(detector)), True)
                        # we have a valid detector to parse with
                        # lets compute the source with the detector config
                        detector_instance.param_compute(
                            source, detector_config)
                        # lets observe the result
                        result = detector_instance.get_result()
                        if result['result']:
                            # we passed the rule lets set that in the final source data
                            self._ioc.message().debug(
                                "PROCESSING RULE [{0}] LOGICAL BLOCK [{1}] PASSED"
                                .format(str(rule['name']),
                                        str(detector)), True)
                            final_source_data[index]['rule_processing'][
                                rule['name']]['status'] = True
                            self._ioc.message().debug(
                                "RULE [{0}] PASSED THUS FAR".format(
                                    str(rule['name'])), True)
                            result.pop('result')
                            if 'parameters' not in final_source_data[index][
                                    'rule_processing'][rule['name']]:
                                final_source_data[index]['rule_processing'][
                                    rule['name']]['parameters'] = result
                            else:
                                final_source_data[index]['rule_processing'][
                                    rule['name']]['parameters'].update(result)
                            if 'constants' in rule:
                                final_source_data[index]['rule_processing'][
                                    rule['name']]['parameters'][
                                        'constants'] = rule['constants']
                            # del out the instance
                            del detector_instance
                            # route on
                            continue
                        else:
                            # rule failed
                            final_source_data[index]['rule_processing'][
                                rule['name']]['status'] = False
                            self._ioc.message().debug(
                                "PROCESSING RULE [{0}] LOGICAL BLOCK [{1}] FAILED :: SOURCE FAILS RULE SET"
                                .format(str(rule['name']),
                                        str(detector)), True)
                            # del out the instance
                            del detector_instance
                            # route on
                            break
                    else:
                        # we got an invalid detector and it couldn't be found
                        self._ioc.message().error("Invalid Detector: [" +
                                                  str(detector) + "]",
                                                  hipchat=True)
                        del detector_instance
                        break
                # finally lets convert back to normal dict for the rule
                final_source_data[index]['rule_processing'][
                    rule['name']] = dict(final_source_data[index]
                                         ['rule_processing'][rule['name']])
            # now lets convert the rule_processing back to a normal array
            final_source_data[index]['rule_processing'] = dict(
                final_source_data[index]['rule_processing'])
            self._ioc.message().debug(
                "FINAL SOURCE RULE PROCESSING STRUCTURE: [{0}]".format(
                    str(final_source_data[index]['rule_processing'])), True)
            self._ioc.message().debug(
                "PROCESSING OBJECT [{0}] OF [{1}] COMPLETE".format(
                    str(index + 1), str(total_records)), True)
            # finally increment our pointer
            index += 1
        # return final for usage elsewhere
        return final_source_data

    def _schedule_scheduling(self, source_id, updated_source):
        # type: (dict, str)  -> bool
        # first lets get applicable servers to run detectors
        # lets only get the least assigned server so we can round robin
        result = self._sql.get_session()\
            .query(JobServers)\
            .filter(JobServers.scheduler == True)\
            .filter(JobServers.active == True)\
            .order_by(JobServers.jobs_assigned)\
            .first()
        if not result:
            self._ioc.message().error(
                "Failed to find active scheduling server!::Dropping Scan",
                hipchat=True)
            return False
        else:
            server = result.id
            # Now lets update the sources for the determined server to work
            stmt = update(SourceData)\
                .where(SourceData.id == source_id)\
                .values(scheduling_server=server, source_data=updated_source)
            self._sql.get_session().execute(stmt)
            self._sql.get_session().commit()
            # finally lets ensure we account for the fact our server is going to do
            # that job and increment the assignment counter
            stmt = update(JobServers).where(JobServers.id == server).values(
                jobs_assigned=result.jobs_assigned + 1)
            self._sql.get_session().execute(stmt)
            self._sql.get_session().commit()
            self._ioc.message().debug(
                "DETECTION FOR SOURCE COMPLETE::SCHEDULED TO SERVER [{0}]".
                format(server), True)
            return True
Example #4
0
class DaemonRouter(GreaseRouter.Router):
    """
    Daemon process routing for GREASE
    """
    __author__ = "James E. Bell Jr"
    __version__ = "1.0"

    _config = Configuration()
    _runs = 0
    _throttle_tick = 0
    _job_completed_queue = []
    _current_real_second = datetime.now().second
    _current_run_second = datetime.now().second
    _log = Logger()
    _process = None
    _importer = Importer(_log)
    _job_metadata = {'normal': 0, 'persistent': 0}
    _alchemyConnection = SQLAlchemyConnection(_config)
    _ioc = Grease()
    _ContextMgr = []

    def __init__(self):
        GreaseRouter.Router.__init__(self)
        if len(self._config.identity) <= 0:
            # ensure we won't run without proper registration
            print("ERR::Unregistered to Database!")
            self._log.error("Registration not found")
            sys.exit(1)

    @staticmethod
    def entry_point():
        """
        Application Entry point
        :return: void
        """
        router = DaemonRouter()
        router.gateway()

    def gateway(self):
        if len(self.args) >= 1:
            if self._config.op_name == 'nt':
                self.set_process(Daemon.WindowsService(sys.argv, self))
            else:
                self.set_process(Daemon.UnixDaemon(self))
            if self.args[0] == 'start':
                self._log.debug("Starting Daemon")
                self.get_process().start()
            elif self.args[0] == 'restart':
                self._log.debug("Restarting Daemon")
                self.get_process().restart()
            elif self.args[0] == 'stop':
                self._log.debug("Stopping Daemon")
                self.get_process().stop()
            else:
                self.bad_exit(
                    "Invalid Command To Daemon expected [start,stop,restart]",
                    2)
        else:
            self.bad_exit(
                "Command not given to daemon! Expected: [start,stop,restart]",
                1)

    def main(self):
        """
        Main Daemon Method
        :return: void
        """
        # Job Execution
        self._log.debug("PROCESS STARTUP", True)
        # initial rc value
        rc = "Garbage"
        # All programs are just loops
        if self._config.get('GREASE_EXECUTE_LINEAR'):
            self._log.debug("LINEAR EXECUTION MODE DETECTED")
        while True:
            # Windows Signal Catching
            if self._config.op_name == 'nt':
                if not rc != win32event.WAIT_OBJECT_0:
                    self._log.debug(
                        "Windows Kill Signal Detected! Closing GREASE")
                    break
            # Continue Processing
            # Process Throttling
            if self._config.get('GREASE_THROTTLE'):
                if int(self.get_throttle_tick()) > int(
                        str(self._config.get('GREASE_THROTTLE'))):
                    # prevent more than 1000 loops per second by default
                    # check time
                    self.log_message_once_a_second("Throttle reached", -11)
                    self.have_we_moved_forward_in_time()
                    continue
            # Job Processing
            if self._config.get('GREASE_EXECUTE_LINEAR'):
                self.process_queue_standard()
            else:
                self.process_queue_threaded()
            # Final Clean Up
            self.inc_runs()
            self.inc_throttle_tick()
            self.have_we_moved_forward_in_time()
            # After all this check for new windows services
            if os.name == 'nt':
                # Block .5ms to listen for exit sig
                rc = win32event.WaitForSingleObject(
                    self.get_process().hWaitStop, 50)

    def log_message_once_a_second(self, message, queue_id):
        # type: (str, int) -> bool
        # have we moved forward since the last second
        if self.have_we_moved_forward_in_time():
            # if we have we can log since the log does not have a record for this second
            self._log.debug(message)
            # We also ensure we record we already logged for zero jobs to process this second
            self.add_job_to_completed_queue(queue_id)
            return True
        else:
            # we have not moved forward in time
            # have we logged for this second
            if not self.has_job_run(queue_id):
                # We have not logged for this second so lets do that now
                self._log.debug(message)
                # record that we logged for this second
                self.add_job_to_completed_queue(queue_id)
                return True
            else:
                return False

    def process_queue_standard(self):
        # type: () -> bool
        job_queue = self.get_assigned_jobs()
        if len(job_queue) is 0:
            self.log_message_once_a_second(
                "Total Jobs To Process: [0] Current Runs: [{0}]".format(
                    self.get_runs()), -1)
        else:
            # We have some jobs to process
            if self._job_metadata['normal'] > 0:
                # if we have any normal jobs lets log
                self._log.debug(
                    "Total Jobs To Process: [{0}] Current Runs: [{1}]".format(
                        self._job_metadata['normal'], self.get_runs()))
            else:
                # we only have persistent jobs to process
                self.log_message_once_a_second(
                    "Total Jobs To Process: [{0}] Current Runs: [{1}]".format(
                        len(job_queue), self.get_runs()), 0)
            # now lets loop through the job schedule
            for job in job_queue:
                # start class up
                command = self._importer.load(job['module'], job['command'])
                # ensure we got back the correct type
                if not command:
                    self._log.error(
                        "Failed To Load Command [{0}] of [{1}]".format(
                            job['command'], job['module']),
                        hipchat=True)
                    del command
                    continue
                if not isinstance(command, GreaseDaemonCommand):
                    self._log.error(
                        "Instance created was not of type GreaseDaemonCommand",
                        hipchat=True)
                    del command
                    continue
                if not job['persistent']:
                    # This is an on-demand job
                    # we just need to execute it
                    self.mark_job_in_progress(job['id'])
                    self._log.debug(
                        "Preparing to execute on-demand job [{0}]".format(
                            job['id']), True)
                    command.attempt_execution(job['id'], job['additional'])
                else:
                    # This is a persistent job
                    if self.has_job_run(job['id']):
                        # Job Already Executed
                        continue
                    else:
                        if job['tick'] is self.get_current_run_second():
                            self._log.debug(
                                "Preparing to execute persistent job [{0}]".
                                format(job['id']), True)
                            command.attempt_execution(job['id'],
                                                      job['additional'])
                            self.add_job_to_completed_queue(job['id'])
                        else:
                            # continue because we are not in the tick required
                            continue
                # Report Telemetry
                self._ioc.run_daemon_telemetry(command)
                if command.get_exe_state()['result']:
                    # job success
                    if job['persistent']:
                        self._log.debug(
                            "Persistent Job Successful [{0}]".format(
                                job['id']))
                    else:
                        self._log.debug(
                            "On-Demand Job Successful [{0}]".format(job['id']))
                        self.mark_job_complete(job['id'])
                else:
                    # job failed
                    if job['persistent']:
                        self._log.debug("Persistent Job Failed [{0}]".format(
                            job['id']))
                    else:
                        self._log.debug("On-Demand Job Failed [{0}]".format(
                            job['id']))
                        self.mark_job_failure(job['id'], job['failures'])
                command.__del__()
                del command
        return True

    def process_queue_threaded(self):
        # type: () -> bool
        self.thread_check()
        job_queue = self.get_assigned_jobs()
        if len(job_queue) is 0:
            # have we moved forward since the last second
            self.log_message_once_a_second(
                "Total Jobs To Process: [0] Current Runs: [{0}]".format(
                    self.get_runs()), -1)
        else:
            if len(self._ContextMgr) >= int(
                    self._config.get('GREASE_THREAD_MAX', '15')):
                self.log_message_once_a_second(
                    "Thread Maximum Reached::Current Runs: [{0}]".format(
                        self.get_runs()), -10)
                return True
            # We have some jobs to process
            if self._job_metadata['normal'] is 0:
                # we only have persistent jobs to process
                self.log_message_once_a_second(
                    "Total Jobs To Process: [{0}] Current Runs: [{1}]".format(
                        len(job_queue), self.get_runs()), 0)
            else:
                self._log.debug(
                    "Total Jobs To Process: [0] Current Runs: [{0}]".format(
                        self.get_runs()), -1)
            # now lets loop through the job schedule
            for job in job_queue:
                # start class up
                command = self._importer.load(job['module'], job['command'])
                # ensure we got back the correct type
                if not command:
                    self._log.error(
                        "Failed To Load Command [{0}] of [{1}]".format(
                            job['command'], job['module']),
                        hipchat=True)
                    del command
                    continue
                if not isinstance(command, GreaseDaemonCommand):
                    self._log.error(
                        "Instance created was not of type GreaseDaemonCommand",
                        hipchat=True)
                    del command
                    continue
                if not job['persistent']:
                    # This is an on-demand job
                    # we just need to execute it
                    self._log.debug(
                        "Passing on-demand job [{0}] to thread manager".format(
                            job['id']), True)
                    self.thread_execute(command, job['id'], job['additional'],
                                        False, job['failures'])
                else:
                    # This is a persistent job
                    if self.has_job_run(job['id']):
                        # Job Already Executed
                        command.__del__()
                        del command
                        continue
                    else:
                        if job['tick'] is self.get_current_run_second():
                            self._log.debug(
                                "Passing persistent job [{0}] to thread manager"
                                .format(job['id']), True)
                            self.thread_execute(command, job['id'],
                                                job['additional'], True)
                            self.add_job_to_completed_queue(job['id'])
                        else:
                            # continue because we are not in the tick required
                            command.__del__()
                            del command
                            continue
        self.thread_check()
        return True

    def thread_execute(self, command, cid, additional, persistent, failures=0):
        # type: (GreaseDaemonCommand, int, dict, bool, int) -> None
        # first ensure the command ID isn't already running
        process_running = False
        for item in self._ContextMgr:
            if item[2] == cid:
                process_running = True
                break
        if process_running:
            # if it is return out
            self._log.debug(
                "Bailing on job [{0}], already executing".format(cid), True)
            return None
        # start process
        proc = threading.Thread(
            target=command.attempt_execution,
            args=(cid, additional),
            name="GREASE EXECUTION THREAD::CID [{0}]".format(cid))
        # set for background
        proc.daemon = True
        if persistent:
            self._log.debug(
                "Beginning persistent execution of job [{0}] on thread".format(
                    cid), True)
        else:
            self.mark_job_in_progress(cid)
            self._log.debug(
                "Beginning on-demand execution of job [{0}] on thread".format(
                    cid), True)
        # start
        proc.start()
        # add command to pool
        self._ContextMgr.append([command, proc, cid, persistent, failures])
        return None

    def thread_check(self):
        final = []
        # Check for tread completion else add back to list
        for command in self._ContextMgr:
            if command[1].isAlive():
                final.append(command)
            else:
                self._log.info("Job completed [{0}]".format(command[2]), True)
                self.record_telemetry(command[0], command[2], command[4],
                                      command[3])
        self._ContextMgr = final
        return

    def record_telemetry(self, command, cid, failures, persistent):
        # type: (GreaseDaemonCommand, int, int, bool) -> None
        # Report Telemetry
        command.set_exe_state('command_id', cid)
        self._ioc.run_daemon_telemetry(command)
        if command.get_exe_state()['result']:
            # job success
            if persistent:
                self._log.debug("Persistent Job Successful [{0}]".format(cid))
            else:
                self._log.debug("On-Demand Job Successful [{0}]".format(cid))
                self.mark_job_complete(cid)
        else:
            # job failed
            if persistent:
                self._log.debug("Persistent Job Failed [{0}]".format(cid))
            else:
                self._log.debug("On-Demand Job Failed [{0}]".format(cid))
                self.mark_job_failure(cid, failures)
        command.__del__()
        del command
        pass

    def mark_job_in_progress(self, job_id):
        """
        sets job as in progress
        :param job_id: int
        :return: bool
        """
        stmt = update(JobQueue).where(JobQueue.id == job_id).values(
            in_progress=True, completed=False)
        self._alchemyConnection.get_session().execute(stmt)
        self._alchemyConnection.get_session().commit()
        return True

    def mark_job_complete(self, job_id):
        """
        Complete a successful job
        :param job_id: int
        :return: bool
        """
        stmt = update(JobQueue).where(JobQueue.id == job_id).values(
            in_progress=False, completed=True, complete_time=datetime.now())
        self._alchemyConnection.get_session().execute(stmt)
        self._alchemyConnection.get_session().commit()
        return True

    def mark_job_failure(self, job_id, current_failures):
        """
        Fail a job
        :param job_id: int
        :param current_failures: int
        :return: bool
        """
        stmt = update(JobQueue)\
            .where(JobQueue.id == job_id)\
            .values(in_progress=False, completed=False, complete_time=None, failures=current_failures + 1)
        self._alchemyConnection.get_session().execute(stmt)
        self._alchemyConnection.get_session().commit()
        return True

    def get_assigned_jobs(self):
        """
        gets current job assignment
        :return: list
        """
        # type: () -> list
        # reset job queue metadata
        self._job_metadata['normal'] = 0
        self._job_metadata['persistent'] = 0
        # create final result
        final = []
        # first find normal jobs
        result = self._alchemyConnection\
            .get_session()\
            .query(JobQueue, JobConfig)\
            .filter(JobQueue.host_name == self._config.node_db_id())\
            .filter(JobQueue.job_id == JobConfig.id) \
            .filter(or_(and_(JobQueue.in_progress == False, JobQueue.completed == False), JobQueue.in_progress == True)) \
            .filter(JobQueue.failures < 6)\
            .all()
        if not result:
            # No Jobs Found
            self._job_metadata['normal'] = 0
        else:
            # Walk the job list
            for job in result:
                self._job_metadata['normal'] += 1
                final.append({
                    'id': job.JobQueue.id,
                    'module': job.JobConfig.command_module,
                    'command': job.JobConfig.command_name,
                    'request_time': datetime.utcnow(),
                    'additional': job.JobQueue.additional,
                    'tick': job.JobConfig.tick,
                    'persistent': False,
                    'failures': job.JobQueue.failures
                })
        # Now search for persistent jobs
        result = self._alchemyConnection\
            .get_session()\
            .query(PersistentJobs, JobConfig)\
            .filter(PersistentJobs.server_id == self._config.node_db_id())\
            .filter(PersistentJobs.command == JobConfig.id)\
            .filter(PersistentJobs.enabled == True)\
            .all()
        if not result:
            # No Jobs Found
            self._job_metadata['persistent'] = 0
        else:
            # Walk the job list
            for job in result:
                self._job_metadata['persistent'] += 1
                final.append({
                    'id': job.PersistentJobs.id,
                    'module': job.JobConfig.command_module,
                    'command': job.JobConfig.command_name,
                    'request_time': datetime.utcnow(),
                    'additional': job.PersistentJobs.additional,
                    'tick': job.JobConfig.tick,
                    'persistent': True,
                    'failures': 0
                })
        return final

    # Class Property getter/setters/methods

    # run counter
    def get_runs(self):
        """
        returns int of amount of loops
        :return: int
        """
        # type: () -> int
        return int(self._runs)

    def inc_runs(self):
        """
        increments run count
        :return: None
        """
        # type: () -> bool
        self._runs += 1
        return True

    def reset_runs(self):
        """
        resets the run counter to 0
        :return: bool
        """
        # type: () -> bool
        self._runs = 0
        return True

    # Job Completed Queue
    def add_job_to_completed_queue(self, job_ib):
        """
        Adds Job to queue so we don't run the job again
        :param job_ib: int
        :return: bool
        """
        # type: int -> bool
        if int(job_ib) not in self._job_completed_queue:
            self._log.debug("Job Executed This Second [{0}]".format(job_ib),
                            True)
            self._job_completed_queue.append(int(job_ib))
            return True
        else:
            return False

    def has_job_run(self, job_id):
        """
        Determines if the job ID has run during the current cycle
        :param job_id: int
        :return: bool
        """
        # type: int -> bool
        if int(job_id) in self._job_completed_queue:
            return True
        else:
            return False

    def reset_completed_job_queue(self):
        """
        clears job run queue
        :return: bool
        """
        # type: () -> bool
        self._log.debug("Completed Per-Second Queue Cleared", True)
        self._job_completed_queue = []

    # throttle tick
    def get_throttle_tick(self):
        """
        returns how many runs in this second
        :return: int
        """
        # type: () -> int
        return int(self._throttle_tick)

    def inc_throttle_tick(self):
        """
        increases throttle tick by 1
        :return: bool
        """
        # type: () -> bool
        self._throttle_tick += 1
        return True

    def reset_throttle_tick(self):
        """
        resets throttle tick to 0
        :return: bool
        """
        # type: () -> bool
        self._throttle_tick = 0
        return True

    # Process Controller
    def set_process(self, process):
        """
        sets the process handler
        :param process: Daemon
        :return: None
        """
        self._process = process
        return None

    def get_process(self):
        """
        Returns _process property
        :return: Daemon/None
        """
        return self._process

    # time operators
    @staticmethod
    def get_current_real_second():
        """
        Gets current second
        :return: int
        """
        # type: () -> int
        return datetime.now().second

    def get_current_run_second(self):
        """
        Gets the current observed second
        :return: int
        """
        # type: () -> int
        return int(self._current_run_second)

    def set_current_run_second(self, sec):
        """
        Sets current observed second
        :param sec: int
        :return: None
        """
        # type: int -> None
        self._current_run_second = int(sec)

    def have_we_moved_forward_in_time(self):
        """
        Answers the question "have we moved forward in time?"
        so we reset our counters and return true else false
        :return: bool
        """
        # type: () -> bool
        if self.get_current_run_second() == self.get_current_real_second():
            return False
        else:
            self._log.debug("Time has moved forward! Restoring Context", True)
            self.set_current_run_second(self.get_current_real_second())
            self.reset_completed_job_queue()
            self.reset_throttle_tick()
            return True
Example #5
0
class ScanOnConfig(GreaseDaemonCommand):
    def __init__(self):
        super(ScanOnConfig, self).__init__()
        self._config = Configuration()
        self._sql = SQLAlchemyConnection(self._config)
        self._scanner_config = DetectorConfiguration.ConfigurationLoader()
        self._context = '{}'
        self._source_data = {}
        self._importer = Importer(self._ioc.message())
        self._duplification_filter = SourceDeDuplify(self._ioc.message())

    def __del__(self):
        super(ScanOnConfig, self).__del__()
        self._sql.get_session().close()

    def get_source_data(self):
        # type: () -> dict
        return self._source_data

    def set_source_data(self, data):
        # type: (dict) -> None
        self._source_data = data

    def execute(self, context='{}'):
        # engage scanning
        self.scan()
        # clear up this
        del self._duplification_filter
        return True

    def scan(self):
        # engage scanners scotty
        for scanner in self._scanner_config.get_scanners():
            self._ioc.message().debug(
                "STARTING SCANNER [{0}]".format(str(scanner)), True)
            # Ensure if we are focused only to process our source
            if os.getenv('GREASE_SOURCE_FOCUS'):
                # we have one, ensure its on the list of configured scanners
                if str(os.getenv('GREASE_SOURCE_FOCUS')) != str(scanner):
                    # It does not match, continue
                    self._ioc.message().info(
                        "Scanner skipped because not focus: [" + str(scanner) +
                        "] searching for [" +
                        str(os.getenv('GREASE_SOURCE_FOCUS')) + "]")
                    continue
            # lets loop through our scanners/detectors to execute the parsing
            # try to load the scanner we want
            parser = self._importer.load(os.getenv('GREASE_SOURCES_PKG', ''),
                                         scanner, True)
            # type check that bad boy to ensure sanity
            if isinstance(parser, BaseSource):
                # if we got back a valid source lets parse that sucker
                self._ioc.message().debug(
                    "PARSING SOURCE [{0}]".format(str(scanner)), True)
                parser.parse_source(
                    self._scanner_config.get_scanner_config(scanner))
                # lets get the results of the parse
                # here we run our de-duplication logic
                self._ioc.message().debug(
                    "PASSING RESULT TO DEDUPLICATION ENGINE [{0}]".format(
                        str(scanner)), True)
                source = self._duplification_filter.create_unique_source(
                    scanner, parser.duplicate_check_fields(),
                    list(parser.get_records()))
                self._ioc.message().debug(
                    "ATTEMPTING DETECTION SCHEDULING [{0}]".format(
                        str(scanner)), True)
                if self._schedule_detection(source, scanner):
                    self._ioc.message().info(
                        "Detector job scheduled from scanner: [" +
                        str(scanner) + "]")
                else:
                    self._ioc.message().error(
                        "Failed to schedule source detection for [" +
                        str(scanner) + "]",
                        hipchat=True)
                del parser
            else:
                # else something went haywire pls feel free to fix your config
                self._ioc.message().error(
                    "Invalid Scanner In Configurations: [" + str(scanner) +
                    "]",
                    hipchat=True)

    def _schedule_detection(self, sources, scanner):
        # type: (dict, str)  -> bool
        # first lets get applicable servers to run detectors
        # lets only get the least assigned server so we can round robin
        result = self._sql.get_session()\
            .query(JobServers)\
            .filter(JobServers.detector == True)\
            .filter(JobServers.active == True)\
            .order_by(JobServers.jobs_assigned)\
            .first()
        if not result:
            self._ioc.message().error(
                "Failed to find detection server! dropping scan!",
                hipchat=True)
            return False
        else:
            server = result.id
        # Now lets insert the sources for the determined server to work
        source = SourceData(source_data=sources,
                            source_server=self._config.node_db_id(),
                            detection_server=server,
                            scanner=scanner,
                            created_time=datetime.utcnow())
        self._sql.get_session().add(source)
        self._sql.get_session().commit()
        # finally lets ensure we account for the fact our server is going to do
        # that job and increment the assignment counter
        stmt = update(JobServers).where(JobServers.id == server).values(
            jobs_assigned=result.jobs_assigned + 1)
        self._sql.get_session().execute(stmt)
        self._sql.get_session().commit()
        self._ioc.message().debug(
            "SOURCE SCHEDULED FOR DETECTION [{0}] TO SERVER [{1}]".format(
                str(scanner), str(server)), True)
        return True
Example #6
0
class LaunchCtl(GreaseDaemonCommand):

    _config = Configuration()
    _sql = SQLAlchemyConnection(_config)

    def __init__(self):
        super(LaunchCtl, self).__init__()
        self.purpose = "Register machine with Job Control Database"

    def __del__(self):
        super(LaunchCtl, self).__del__()
        self._sql.get_session().close()

    def execute(self, context='{}'):
        if len(sys.argv) >= 4:
            action = str(sys.argv[3])
        else:
            action = ''
        if action == 'register':
            return bool(self._action_register())
        elif action == 'kill-server':
            return bool(self._action_cull_server())
        elif action == 'revive-server':
            return bool(self._action_restore_server())
        elif action == 'list-pjobs':
            return bool(self._action_list_persistent_jobs())
        elif action == 'list-jobs':
            return bool(self._action_list_job_schedule())
        elif action == 'assign-task':
            return bool(self._action_assign_task())
        elif action == 'remove-task':
            return bool(self._action_remove_task())
        elif action == 'enable-detection':
            return bool(self._action_enable_detection())
        elif action == 'enable-scheduling':
            return bool(self._action_enable_scheduling())
        elif action == 'disable-detection':
            return bool(self._action_disable_detection())
        elif action == 'disable-scheduling':
            return bool(self._action_disable_scheduling())
        elif action == 'create-job':
            return bool(self._action_create_job())
        elif action == 'load-db':
            return bool(self._action_load_db())
        else:
            print("ERR: Invalid Command Expected: ")
            print("\tregister")
            print("\tkill-server")
            print("\trevive-server")
            print("\tlist-pjobs")
            print("\tlist-jobs")
            print("\tassign-task")
            print("\tremove-task")
            print("\tenable-detection")
            print("\tenable-scheduling")
            print("\tdisable-detection")
            print("\tdisable-scheduling")
            print("\tcreate-job")
            print("\tload-db")
            return True

    def _action_register(self):
        # type: () -> bool
        if os.path.isfile(self._config.identity_file):
            self._ioc.message().warning("Machine Already Registered With Grease Job Control")
            print("Machine Already Registered With Grease Job Control")
            return True
        else:
            # we need to register
            # first lets generate a new UUID
            uid = uuid.uuid4()
            # lets see if we have been provided an execution env
            if len(sys.argv) >= 5:
                exe_env = str(sys.argv[4])
            else:
                exe_env = 'general'
            # next lets register with the job control database
            server = JobServers(
                host_name=str(uid),
                execution_environment=exe_env,
                active=True,
                activation_time=datetime.utcnow()
            )
            self._sql.get_session().add(server)
            self._sql.get_session().commit()
            file(self._config.identity_file, 'w').write(str(uid))
            return True

    def _action_cull_server(self):
        # type: () -> bool
        if len(sys.argv) >= 5:
            server = str(sys.argv[4])
        else:
            if os.path.isfile(self._config.identity_file):
                server = self._config.identity
            else:
                print("Server has no registration record locally")
                return True
        # get the server ID
        result = self._sql.get_session().query(JobServers)\
            .filter(JobServers.host_name == server)\
            .first()
        if result:
            server_id = result.id
            instance = Section31()
            instance._declare_doctor(server_id)
            instance._cull_server(server_id)
            return True
        else:
            print("Job Server Not In Registry")
            return True

    def _action_restore_server(self):
        # type: () -> bool
        if len(sys.argv) >= 5:
            server = str(sys.argv[4])
        else:
            if os.path.isfile(self._config.identity_file):
                server = self._config.identity
            else:
                print("Server has no registration record locally")
                return True
        # get the server ID
        result = self._sql.get_session().query(JobServers)\
            .filter(JobServers.host_name == server)\
            .first()
        if result:
            server_id = result.id
        else:
            print("Job Server Not In Registry")
            return True
        # clear the doctor from the server health table
        stmt = update(ServerHealth)\
            .where(ServerHealth.server == server_id)\
            .values(doctor=None)
        self._sql.get_session().execute(stmt)
        self._sql.get_session().commit()
        # next reactivate it
        stmt = update(JobServers)\
            .where(JobServers.id == server_id)\
            .values(active=True, activation_time=datetime.utcnow())
        self._sql.get_session().execute(stmt)
        self._sql.get_session().commit()
        return True

    def _action_list_persistent_jobs(self):
        # type: () -> bool
        result = self._sql.get_session().query(PersistentJobs, JobConfig)\
            .filter(PersistentJobs.command == JobConfig.id)\
            .filter(PersistentJobs.enabled == True)\
            .filter(PersistentJobs.server_id == self._config.node_db_id())\
            .all()
        if not result:
            print("No Scheduled Jobs on this node")
        else:
            for job in result:
                print(
                    "\tPackage: [{0}] Job: [{1}] Tick: [{2}] Additional: [{3}]".format(
                        job.JobConfig.command_module,
                        job.JobConfig.command_name,
                        job.JobConfig.tick,
                        job.PersistentJob.additional
                    )
                )
        return True

    def _action_list_job_schedule(self):
        # type: () -> bool
        result = self._sql.get_session().query(JobQueue)\
            .filter(JobQueue.completed == False)\
            .filter(JobQueue.in_progress == False)\
            .filter(JobQueue.host_name == self._config.node_db_id())\
            .all()
        if not result:
            print("No jobs scheduled on this node")
            return True
        for job in result:
            print("Jobs in Queue:")
            print("\t Module: [{0}] Command: [{1}] Additional: [{2}]".format(
                job.JobConfig.command_module,
                job.JobConfig.command_name,
                job.JobQueue.additional
            ))
        return True

    def _action_assign_task(self):
        # type: () -> bool
        if len(sys.argv) >= 5:
            new_task = str(sys.argv[4])
        else:
            print("Please provide a command to schedule to node")
            return True
        result = self._sql.get_session().query(JobConfig)\
            .filter(JobConfig.command_name == new_task)\
            .first()
        if not result:
            print("Command not found! Available Commands:")
            result = self._sql.get_session().query(JobConfig).all()
            if not result:
                print("NO JOBS CONFIGURED IN DB")
            else:
                for job in result:
                    print("{0}".format(job.command_name))
            return True
        else:
            pJob = PersistentJobs(
                server_id=self._config.node_db_id(),
                command=result.id,
                additional={},
                enabled=True
            )
            self._sql.get_session().add(pJob)
            self._sql.get_session().commit()
            print("TASK ASSIGNED")
            return True

    def _action_remove_task(self):
        # type: () -> bool
        if os.path.isfile(self._config.identity_file):
            server = self._config.node_db_id()
        else:
            print("Server has no registration record locally")
            return True
        if len(sys.argv) >= 5:
            new_task = str(sys.argv[4])
            result = self._sql.get_session().query(JobConfig).filter(JobConfig.command_name == new_task).first()
            if not result:
                print("Failed to find job in configuration tables")
                return True
            else:
                stmt = update(PersistentJobs)\
                    .where(and_(PersistentJobs.server_id == server, PersistentJobs.command == result.id))\
                    .values(enabled=False)
                self._sql.get_session().execute(stmt)
                self._sql.get_session().commit()
                print("TASK UNASSIGNED")
                return True

    def _action_enable_detection(self):
        # type: () -> bool
        if os.path.isfile(self._config.identity_file):
            server = self._config.identity
            stmt = update(JobServers).where(JobServers.host_name == server).values(detector=True)
            self._sql.get_session().execute(stmt)
            self._sql.get_session().commit()
            print("DETECTION ENABLED")
            return True
        else:
            print("ERR: SERVER UNREGISTERED")
            return False

    def _action_enable_scheduling(self):
        # type: () -> bool
        if os.path.isfile(self._config.identity_file):
            server = self._config.identity
            stmt = update(JobServers).where(JobServers.host_name == server).values(scheduler=True)
            self._sql.get_session().execute(stmt)
            self._sql.get_session().commit()
            print("SCHEDULING ENABLED")
            return True
        else:
            print("ERR: SERVER UNREGISTERED")
            return False

    def _action_disable_detection(self):
        # type: () -> bool
        if os.path.isfile(self._config.identity_file):
            server = self._config.identity
            stmt = update(JobServers).where(JobServers.host_name == server).values(detector=False)
            self._sql.get_session().execute(stmt)
            self._sql.get_session().commit()
            print("DETECTION DISABLED")
            return True
        else:
            print("ERR: SERVER UNREGISTERED")
            return False

    def _action_disable_scheduling(self):
        # type: () -> bool
        if os.path.isfile(self._config.identity_file):
            server = self._config.identity
            stmt = update(JobServers).where(JobServers.host_name == server).values(scheduler=False)
            self._sql.get_session().execute(stmt)
            self._sql.get_session().commit()
            print("SCHEDULING DISABLED")
            return True
        else:
            print("ERR: SERVER UNREGISTERED")
            return False

    def _action_create_job(self):
        # type: () -> bool
        if len(sys.argv) >= 6:
            new_task_module = str(sys.argv[4])
            new_task_class = str(sys.argv[5])
            # ensure a job doesn't exist
            result = self._sql.get_session().query(JobConfig)\
                .filter(JobConfig.command_module == new_task_module)\
                .filter(JobConfig.command_name == new_task_class)\
                .all()
            if result:
                print("Job already exists")
                return True
            NewJob = JobConfig(
                command_module=new_task_module,
                command_name=new_task_class
            )
            self._sql.get_session().add(NewJob)
            self._sql.get_session().commit()
            print("Job Created")
            return True
        else:
            print("ERR: INVALID SUBCOMMAND::MUST PASS MODULE AND CLASS NAME")
            return False

    def _action_load_db(self):
        print("LOADING DB")
        RDBMSTypes.__main__()
        if not os.path.isfile(self._config.identity_file):
            self._action_register()
        return True
Example #7
0
 def __init__(self):
     super(Scheduler, self).__init__()
     self._scanner_config = DetectorConfiguration.ConfigurationLoader()
     self._config = Configuration()
     self._sql = SQLAlchemyConnection(self._config)
Example #8
0
class Scheduler(GreaseDaemonCommand):
    def __init__(self):
        super(Scheduler, self).__init__()
        self._scanner_config = DetectorConfiguration.ConfigurationLoader()
        self._config = Configuration()
        self._sql = SQLAlchemyConnection(self._config)

    def __del__(self):
        super(Scheduler, self).__del__()
        self._sql.get_session().close()

    def execute(self, context='{}'):
        # Lets go get the jobs needing to be scheduled by this server
        result = self._sql.get_session().query(SourceData, JobServers)\
            .filter(SourceData.scheduling_server == JobServers.id)\
            .filter(JobServers.id == self._config.node_db_id())\
            .filter(SourceData.detection_start_time != None)\
            .filter(SourceData.detection_end_time != None)\
            .filter(SourceData.detection_complete == True)\
            .filter(SourceData.scheduling_start_time == None)\
            .filter(SourceData.scheduling_end_time == None)\
            .filter(SourceData.scheduling_complete == False)\
            .limit(15)\
            .all()
        if not result:
            self._ioc.message().debug("No Sources to schedule", True)
            return True
        else:
            for schedule in result:
                # lets own this
                self._take_ownership(schedule.SourceData.id)
                self._schedule_via_sources(schedule.SourceData.source_data)
                # lets finish out
                self._complete(schedule.SourceData.id)
        return True

    def _schedule_via_sources(self, sources):
        # type: (dict) -> None
        for index in sources:
            # check to see if we already assigned this source
            if 'grease_internal_assigned' in sources[index]:
                if sources[index]['grease_internal_assigned']:
                    # we already successfully did the thing
                    self._ioc.message().debug(
                        "SCHEDULING ALREADY PROCEEDED ON INDEX [{0}]".format(
                            str(index)), True)
                    continue
                else:
                    # it failed previously
                    self._ioc.message().warning(
                        "Record Failed To Be Assigned::Loop Over")
                    continue
            else:
                # we have not attempted to schedule this record yet
                if len(sources[index]['rule_processing']) > 0:
                    # rules got processed on this source
                    for rule_name, rule_results in sources[index][
                            'rule_processing'].iteritems():
                        self._ioc.message().debug(
                            "PROCESSING SOURCE [{0}] FOR RULE [{1}]".format(
                                str(index), str(rule_name)), True)
                        if rule_results['status']:
                            self._ioc.message().debug(
                                "SOURCE [{0}] RULE [{1}] PASSED DETECTION".
                                format(str(index), str(rule_name)), True)
                            # rule was successful time to schedule
                            # lets go load the rule config
                            rule_config = self._scanner_config.get_config(
                                rule_name)
                            self._ioc.message().debug(
                                "READING CONFIG FOR RULE [{0}]".format(
                                    str(rule_name)), True)
                            # setup the execution environment variable
                            if 'exe_env' in rule_config:
                                if len(rule_config['exe_env']) > 0:
                                    # if we have a valid string then just set it
                                    exe_env = rule_config['exe_env']
                                else:
                                    # else they left it blank, default to general
                                    exe_env = 'general'
                            else:
                                # they didn't provide one so default to general
                                exe_env = 'general'
                            # next lets get the ID
                            if 'incident_number' in sources[index]:
                                # Set the incident Number
                                i_num = sources[index]['incident_number']
                            elif 'number' in sources[index]:
                                # Set the events number
                                i_num = sources[index]['number']
                            else:
                                # default to md5 hash of values list to ensure unique ID
                                i_num = hashlib.sha256(
                                    json.dumps(
                                        sources[index].values())).hexdigest()
                            # Now lets set the job additional parameters
                            additional = dict()
                            if 'parameters' in sources[index][
                                    'rule_processing'][rule_name]:
                                if isinstance(
                                        sources[index]['rule_processing']
                                    [rule_name]['parameters'], dict):
                                    additional = sources[index][
                                        'rule_processing'][rule_name][
                                            'parameters']
                                else:
                                    self._ioc.message().warning(
                                        "Parameters were not dictionary for rule: ["
                                        + str(rule_name) + "]")
                            # Now lets setup the ticket info
                            additional['ticket'] = i_num
                            self._ioc.message().debug(
                                "PREPARING TO SCHEDULE JOB [{0}] FOR EXECUTION::"
                                "EXE_ENV: [{1}] ADDITIONAL: [{2}] ticket: [{3}]"
                                .format(str(rule_config['job']), str(exe_env),
                                        str(additional), str(i_num)), True)
                            if self._assign(
                                    rule_config['job'], exe_env,
                                    str(
                                        self._config.get(
                                            'SCHEDULE_PKG',
                                            'localhost_generic')), str(i_num),
                                    additional):
                                # we successfully assigned the ticket
                                self._ioc.message().debug(
                                    "JOB EXECUTION SCHEDULING SUCCESSFUL [{0}] FOR RULE [{1}]"
                                    .format(str(index), str(rule_name)), True)
                                sources[index][
                                    'grease_internal_assigned'] = True
                                continue
                            else:
                                # we failed to assign the ticket
                                self._ioc.message().warning(
                                    "JOB EXECUTION SCHEDULING FAILED [{0}] FOR RULE [{1}]"
                                    .format(str(index), str(rule_name)), True)
                                sources[index][
                                    'grease_internal_assigned'] = False
                                continue
                        else:
                            # rule failed fine then
                            self._ioc.message().debug(
                                "RULE FAILED FOR SOURCE [{0}] RULE [{1}]".
                                format(str(index), str(rule_name)), True)
                            continue
                else:
                    # No rules were processed on this source
                    self._ioc.message().debug(
                        "RULE PROCESSING WAS EMPTY FOR SOURCE [{0}]".format(
                            str(index)), True)
                    continue

    def _assign(self, job, exec_env, package, ticket, additional=dict):
        # type: (str, str, str, str, dict) -> bool
        # check to ensure this ticket isn't already on the schedule
        self._ioc.message().debug(
            "Job Scheduling Starting::Job [{0}] Exec_Env [{1}] Package [{2}] Ticket [{3}] Additional [{4}]"
            .format(str(job), str(exec_env), str(package), str(ticket),
                    str(additional)), True)
        if len(ticket) > 0:
            self._ioc.message().debug(
                "Validating ticket ID not already in Job Queue", True)
            result = self._sql.get_session().query(JobQueue)\
                .filter(JobQueue.ticket == ticket)\
                .filter(or_(and_(JobQueue.in_progress == False, JobQueue.completed == False), JobQueue.in_progress == True))\
                .all()
            if result:
                self._ioc.message().warning(
                    "Ticket Already in Job Queue for Execution Ticket")
                return False
        # lets only get the least assigned server so we can round robin
        self._ioc.message().debug("Searching for Execution Server", True)
        result = self._sql.get_session()\
            .query(JobServers)\
            .filter(JobServers.execution_environment == exec_env)\
            .filter(JobServers.active == True)\
            .order_by(JobServers.jobs_assigned)\
            .first()
        if not result:
            self._ioc.message().error(
                "No Execution Environments Found For Job: [" + job + "]",
                hipchat=True)
            return False
        server_info = result.id
        server_job_count = int(result.jobs_assigned)
        self._ioc.message().debug(
            "Job Server Selected [{0}] current assignment total [{1}]".format(
                server_info, server_job_count), True)
        self._ioc.message().debug("Searching for Job Configuration", True)
        result = self._sql.get_session().query(JobConfig)\
            .filter(JobConfig.command_module == package)\
            .filter(JobConfig.command_name == job)\
            .first()
        if not result:
            self._ioc.message().error(
                "No Jobs Configured For Requested Job: [" + job +
                "] for package: [" + package + "]",
                hipchat=True)
            return False
        job_id = result.id
        # Proceed to schedule
        self._ioc.message().debug(
            "Creating new job in queue for job [{0}] on node [{1}]".format(
                job_id, server_info), True)
        JobToQueue = JobQueue(host_name=server_info,
                              job_id=job_id,
                              ticket=ticket,
                              additional=additional)
        self._sql.get_session().add(JobToQueue)
        self._sql.get_session().commit()
        # that job and increment the assignment counter
        self._ioc.message().debug(
            "incrementing jobs assigned on server [{0}]".format(server_info),
            True)
        self._sql.get_session().query(JobServers).filter_by(
            id=server_info).update({'jobs_assigned': server_job_count + 1})
        self._sql.get_session().commit()
        self._ioc.message().debug(
            "JOB [{0}] SCHEDULED FOR SERVER [{1}]".format(
                str(job_id), str(server_info)), True)
        return True

    def _take_ownership(self, source_file_id):
        # type: (int) -> None
        stmt = update(SourceData)\
            .where(SourceData.id == source_file_id)\
            .values(scheduling_start_time=datetime.utcnow())
        self._sql.get_session().execute(stmt)
        self._sql.get_session().commit()
        self._ioc.message().debug(
            "TAKING OWNERSHIP OF SOURCE [{0}]".format(str(source_file_id), ),
            True)

    def _complete(self, source_file_id):
        # type: (int) -> None
        stmt = update(SourceData)\
            .where(SourceData.id == source_file_id)\
            .values(scheduling_complete=True, scheduling_end_time=datetime.utcnow())
        self._sql.get_session().execute(stmt)
        self._sql.get_session().commit()
        self._ioc.message().debug(
            "COMPLETING SOURCE [{0}]".format(str(source_file_id), ), True)