def run(self): '''Main process logic''' logging.debug("Worker (%s): starting up", self.identity) from laikaboss import config from laikaboss.dispatch import close_modules from laikaboss.util import init_logging logging.debug("using config %s", self.config_location) config.init(path=self.config_location) init_logging() log_debug("Worker %s started at %s" % (self.identity, time.time())) self.keep_running = True perform_grace_check = False # Add intercept for graceful shutdown signal.signal(signal.SIGTERM, functools.partial(shutdown_handler, self)) signal.signal(signal.SIGINT, functools.partial(shutdown_handler, self)) # Connect to broker logging.debug("Worker (%s): connecting broker", self.identity) context = zmq.Context(1) self.broker = context.socket(zmq.DEALER) self.broker.setsockopt(zmq.IDENTITY, self.identity) self.broker.connect(self.broker_address) self.broker_poller.register(self.broker, zmq.POLLIN) # Ask for work # request should be in one of the following formats # ['', status] # where: # status -- One of our defined status constants, determines # how we handle this request self.broker.send_multipart(['', LRU_READY]) # Indicators for worker expiration counter = 0 start_time = time.time() + randint(1, 60) while self.keep_running: try: result = self.perform_scan(self.poll_timeout) if result: counter += 1 should_quit = ( counter >= self.max_scan_items or ((time.time() - start_time)/60) >= self.ttl or not self.keep_running) # Determine next status status = LRU_QUIT if result: if should_quit: status = LRU_RESULT_QUIT else: status = LRU_RESULT_READY else: if should_quit: status = LRU_QUIT perform_grace_check = True else: status = LRU_READY # Build reply if result: reply = ['', status, ''] + result else: reply = ['', status] # reply should be in one of the following formats # ['', status] # ['', status, '', client_id, '', reply] # where: # status -- One of our defined status constants, # determines how we handle this request # client_id -- ZMQ identifier of the client socket # reply -- The content of the reply #logging.debug("Worker: sending request %s", str(reply)) tracker = self.broker.send_multipart(reply, copy=False, track=True) while not tracker.done and result: time.sleep(0.1) if should_quit: self.keep_running = False except zmq.ZMQError as zmqerror: if "Interrupted system call" not in str(zmqerror): logging.exception("Worker (%s): Received ZMQError", self.identity) else: logging.debug("Worker (%s): ZMQ interrupted by shutdown signal", self.identity) except QuitScanException: logging.debug("Worker (%s): Caught scan termination exception", self.identity) break # Begin graceful shutdown logging.debug("Worker (%s): beginning graceful shutdown sequence", self.identity) if perform_grace_check: logging.debug("Worker (%s): performing grace check", self.identity) try: result = self.perform_scan(self.poll_timeout) if result: reply = ['', LRU_RESULT_QUIT, ''] + result # reply should be in the following format # ['', status, '', client_id, '', reply] # where: # status -- One of our defined status constants, # determines how we handle this request # client_id -- ZMQ identifier of the client socket # reply -- The content of the reply tracker = self.broker.send_multipart(reply, copy=False, track=True) while not tracker.done: time.sleep(0.1) except zmq.ZMQError as zmqerror: if "Interrupted system call" not in str(zmqerror): logging.exception("Worker (%s): Received ZMQError", self.identity) else: logging.debug("Worker (%s): ZMQ interrupted by shutdown signal", self.identity) try: with timeout(self.shutdown_grace_timeout, exception=QuitScanException): close_modules() except QuitScanException: logging.debug("Worker (%s): Caught scan termination exception during destruction", self.identity) log_debug("Worker %s dying after %i objects and %i seconds" % ( self.identity, counter, time.time() - start_time)) logging.debug("Worker (%s): finished", self.identity)
def run(self): global CONFIG_PATH config.init(path=CONFIG_PATH) init_logging() ret_value = 0 # Loop and accept messages from both channels, acting accordingly while True: next_task = self.task_queue.get() if next_task is None: # Poison pill means shutdown self.task_queue.task_done() logging.debug("%s Got poison pill" % (os.getpid())) break try: with open(next_task) as nextfile: file_buffer = nextfile.read() except IOError: logging.debug("Error opening: %s" % (next_task)) self.task_queue.task_done() self.result_queue.put(answer) continue resultJSON = "" try: # perform the work result = ScanResult() result.source = SOURCE result.startTime = time.time() result.level = level_metadata myexternalVars = ExternalVars(filename=next_task, source=SOURCE, ephID=EPHID, extMetaData=EXT_METADATA) Dispatch(file_buffer, result, 0, externalVars=myexternalVars, extScanModules=SCAN_MODULES) resultJSON = getJSON(result) if SAVE_PATH: rootObject = getRootObject(result) UID_SAVE_PATH = "%s/%s" % (SAVE_PATH, get_scanObjectUID(rootObject)) if not os.path.exists(UID_SAVE_PATH): try: os.makedirs(UID_SAVE_PATH) except (OSError, IOError) as e: error("\nERROR: unable to write to %s...\n" % (UID_SAVE_PATH)) raise for uid, scanObject in result.files.iteritems(): with open("%s/%s" % (UID_SAVE_PATH, uid), "wb") as f: f.write(scanObject.buffer) if scanObject.filename and scanObject.depth != 0: linkPath = "%s/%s" % (UID_SAVE_PATH, scanObject.filename.replace("/","_")) if not os.path.lexists(linkPath): os.symlink("%s" % (uid), linkPath) elif scanObject.filename: filenameParts = scanObject.filename.split("/") os.symlink("%s" % (uid), "%s/%s" % (UID_SAVE_PATH, filenameParts[-1])) with open("%s/%s" % (UID_SAVE_PATH, "result.json"), "wb") as f: f.write(resultJSON) if LOG_RESULT: log_result(result) if LOG_JSON: LOCAL_PATH = LOG_JSON with open(LOCAL_PATH, "ab") as f: f.write(resultJSON + "\n") except: logging.exception("Scan worker died, shutting down") ret_value = 1 break finally: self.task_queue.task_done() self.result_queue.put(zlib.compress(resultJSON)) close_modules() return ret_value
def run(self): '''Main process logic''' logging.debug("Worker (%s): starting up", self.identity) from laikaboss import config from laikaboss.dispatch import close_modules from laikaboss.util import init_logging logging.debug("using config %s", self.config_location) config.init(path=self.config_location) init_logging() log_debug("Worker %s started at %s" % (self.identity, time.time())) self.keep_running = True perform_grace_check = False # Add intercept for graceful shutdown signal.signal(signal.SIGTERM, functools.partial(shutdown_handler, self)) signal.signal(signal.SIGINT, functools.partial(shutdown_handler, self)) # Connect to broker logging.debug("Worker (%s): connecting broker", self.identity) context = zmq.Context(1) self.broker = context.socket(zmq.DEALER) self.broker.setsockopt(zmq.IDENTITY, self.identity) if self.use_ipv6: if hasattr(zmq, 'IPV6'): self.broker.setsockopt(zmq.IPV6, 1) elif hasattr(zmq, 'IPV4ONLY'): self.broker.setsockopt(zmq.IPV4ONLY, 0) else: logging.error("This version of ZMQ does not support IPv6") self.broker.connect(self.broker_address) self.broker_poller.register(self.broker, zmq.POLLIN) # Ask for work # request should be in one of the following formats # ['', status] # where: # status -- One of our defined status constants, determines # how we handle this request self.broker.send_multipart(['', LRU_READY]) # Indicators for worker expiration counter = 0 start_time = time.time() + randint(1, 60) while self.keep_running: try: result = self.perform_scan(self.poll_timeout) if result: counter += 1 should_quit = ( counter >= self.max_scan_items or ((time.time() - start_time)/60) >= self.ttl or not self.keep_running) # Determine next status status = LRU_QUIT if result: if should_quit: status = LRU_RESULT_QUIT else: status = LRU_RESULT_READY else: if should_quit: status = LRU_QUIT perform_grace_check = True else: status = LRU_READY # Build reply if result: reply = ['', status, ''] + result else: reply = ['', status] # reply should be in one of the following formats # ['', status] # ['', status, '', client_id, '', reply] # where: # status -- One of our defined status constants, # determines how we handle this request # client_id -- ZMQ identifier of the client socket # reply -- The content of the reply #logging.debug("Worker: sending request %s", str(reply)) tracker = self.broker.send_multipart(reply, copy=False, track=True) while not tracker.done and result: time.sleep(0.1) if should_quit: self.keep_running = False except zmq.ZMQError as zmqerror: if "Interrupted system call" not in str(zmqerror): logging.exception("Worker (%s): Received ZMQError", self.identity) else: logging.debug("Worker (%s): ZMQ interrupted by shutdown signal", self.identity) except QuitScanException: logging.debug("Worker (%s): Caught scan termination exception", self.identity) break # Begin graceful shutdown logging.debug("Worker (%s): beginning graceful shutdown sequence", self.identity) if perform_grace_check: logging.debug("Worker (%s): performing grace check", self.identity) try: result = self.perform_scan(self.poll_timeout) if result: reply = ['', LRU_RESULT_QUIT, ''] + result # reply should be in the following format # ['', status, '', client_id, '', reply] # where: # status -- One of our defined status constants, # determines how we handle this request # client_id -- ZMQ identifier of the client socket # reply -- The content of the reply tracker = self.broker.send_multipart(reply, copy=False, track=True) while not tracker.done: time.sleep(0.1) except zmq.ZMQError as zmqerror: if "Interrupted system call" not in str(zmqerror): logging.exception("Worker (%s): Received ZMQError", self.identity) else: logging.debug("Worker (%s): ZMQ interrupted by shutdown signal", self.identity) try: with timeout(self.shutdown_grace_timeout, exception=QuitScanException): close_modules() except QuitScanException: logging.debug("Worker (%s): Caught scan termination exception during destruction", self.identity) log_debug("Worker %s dying after %i objects and %i seconds" % ( self.identity, counter, time.time() - start_time)) logging.debug("Worker (%s): finished", self.identity)
def run(self): global CONFIG_PATH config.init(path=CONFIG_PATH) init_logging() ret_value = 0 # Loop and accept messages from both channels, acting accordingly while True: next_task = self.task_queue.get() if next_task is None: # Poison pill means shutdown self.task_queue.task_done() logging.debug("%s Got poison pill" % (os.getpid())) break try: with open(next_task) as nextfile: file_buffer = nextfile.read() except IOError: logging.debug("Error opening: %s" % (next_task)) self.task_queue.task_done() self.result_queue.put(answer) continue resultJSON = "" try: # perform the work result = ScanResult() result.source = SOURCE result.startTime = time.time() result.level = level_metadata myexternalVars = ExternalVars(filename=next_task, source=SOURCE, ephID=EPHID, extMetaData=EXT_METADATA) Dispatch(file_buffer, result, 0, externalVars=myexternalVars, extScanModules=SCAN_MODULES) resultJSON = getJSON(result) if SAVE_PATH: rootObject = getRootObject(result) UID_SAVE_PATH = "%s/%s" % (SAVE_PATH, get_scanObjectUID(rootObject)) if not os.path.exists(UID_SAVE_PATH): try: os.makedirs(UID_SAVE_PATH) except (OSError, IOError) as e: error("\nERROR: unable to write to %s...\n" % (UID_SAVE_PATH)) raise for uid, scanObject in result.files.iteritems(): with open("%s/%s" % (UID_SAVE_PATH, uid), "wb") as f: f.write(scanObject.buffer) if scanObject.filename and scanObject.depth != 0: linkPath = "%s/%s" % (UID_SAVE_PATH, scanObject.filename.replace("/","_")) if not os.path.lexists(linkPath): os.symlink("%s" % (uid), linkPath) elif scanObject.filename: filenameParts = scanObject.filename.split("/") os.symlink("%s" % (uid), "%s/%s" % (UID_SAVE_PATH, filenameParts[-1])) with open("%s/%s" % (UID_SAVE_PATH, "result.json"), "wb") as f: f.write(resultJSON) if LOG_RESULT: log_result(result) except: logging.exception("Scan worker died, shutting down") ret_value = 1 break finally: self.task_queue.task_done() self.result_queue.put(zlib.compress(resultJSON)) close_modules() return ret_value