def periodic(command_string, min_seconds, max_seconds, foreground=False, ignore_errors=False, wait=True, max_count=None, max_runtime_seconds=None): if (max_seconds < min_seconds): (min_seconds, max_seconds) = (max_seconds, min_seconds) if (not dsz.ui.Prompt( (CONFIRM_PERIOD_MSG % (command_string, min_seconds, max_seconds)))): return if (not foreground): dsz.ui.Background() count = 1 start_time = datetime.now() while True: if wait: result = dsz.cmd.Run(('foreground ' + command_string), dsz.RUN_FLAG_RECORD) else: result = dsz.cmd.Run(('background ' + command_string), dsz.RUN_FLAG_RECORD) if ((not ignore_errors) and (not result)): change_command = dsz.ui.Prompt( (CHANGE_COMMAND_ERROR % command_string)) if (not change_command): return (-1) command_string = dsz.ui.GetString(NEW_COMMAND_MSG) continue if (max_count and (count >= max_count)): print '' dsz.ui.Echo('Max count reached. Exiting.') break time_delta = (datetime.now() - start_time) if (max_runtime_seconds and (_total_seconds(time_delta) >= max_runtime_seconds)): print '' dsz.ui.Echo('Max runtime exceeded. Exiting.') break sleep_duration = random.randint(min_seconds, max_seconds) print '' dsz.ui.Echo((SLEEPING_MSG % sleep_duration), dsz.GOOD) dsz.Sleep((sleep_duration * 1000)) count += 1
def CreateLogFile(self, prefix, suffix, subDir=None, utf8=True): import mcl.data.env import os import os.path _LOG_DIR_ENV_NAME = '_LOGPATH' storageDir = mcl.data.env.GetValue(_LOG_DIR_ENV_NAME, True) if len(storageDir) > 0: storageDir = storageDir + '/' if subDir != None: storageDir = storageDir + subDir try: os.makedirs(storageDir) except: pass path = os.path.normpath(storageDir + '/') numTrysLeft = 1000 while 1: if numTrysLeft > 0: numTrysLeft = numTrysLeft - 1 logName = _logGenerateName(prefix, suffix) fullPath = os.path.normpath(path + '/' + logName) if len(fullPath) > 260: logName = _logGenerateName('x', suffix) fullPath = os.path.normpath(path + '/' + logName) try: _f = os.open( fullPath, os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_BINARY) f = os.fdopen(_f, 'wb') except: import dsz dsz.Sleep(10) continue utf8 and f.write('\xef\xbb\xbf') return (f, path, logName) return (None, '', '')
def next(self): if not self.currentItems.empty(): return self.currentItems.get() else: if self.currentIndex == None: self.currentIndex = 0 else: self.currentIndex = self.currentIndex + 1 if self.currentIndex >= len(self.files): while self.IsRunning(): if dsz.script.CheckStop(): raise StopIteration if self.__setLogs(): return self.next() dsz.Sleep(5000) self.__setLogs() if self.currentIndex >= len(self.files): self.currentIndex = None raise StopIteration self.__parseFile(self.files[self.currentIndex]) return self.next()
import dsz import ops.data import ops.parseargs dsz.control.echo.Off() dsz.ui.Background() parser = ops.parseargs.ArgumentParser() parser.add_argument('command_id', type=int, help='Command ID to monitor.') args = parser.parse_args() pr = ops.data.getDszObject(args.command_id) lasterror = None while True: dsz.script.CheckStop() dsz.Sleep(5000) pr.update() if (not pr.commandmetadata.isrunning): break errors = pr.commandmetadata.friendlyerrors if (not len(errors)): continue if ((lasterror is None) or (lasterror < errors[(-1)].timestamp)): lasterror = errors[(-1)].timestamp msg = ('packetredirect failed to send!\n Command %d: %s\n' % (pr.commandmetadata.id, pr.commandmetadata.fullcommand)) for i in errors[(-1)]: if ((i.type == 'OsError') and (i.text == 'The system cannot find the file specified.')): msg += ( " - %s Do you need '-driver', '-raw' or to toggle FLAV?" % i) else: msg += (' - %s\n' % i)
def scan(scansweepHelper): lastresults = 0 alreadyoutput = [] num_remaining = scanbase.num_jobs(scansweepHelper.session) sanity_string = ('[%s] Sanity output: %s jobs remaining, %s-%s remaining' % (dsz.Timestamp(), num_remaining, ops.timehelper.get_age_from_seconds( (num_remaining * scansweepHelper.min_seconds)), ops.timehelper.get_age_from_seconds( (num_remaining * scansweepHelper.max_seconds)))) dsz.ui.Echo(sanity_string, dsz.GOOD) scansweepHelper.showstats() if (not os.path.exists(os.path.dirname( scansweepHelper.scansweep_logfile))): os.mkdir(os.path.dirname(scansweepHelper.scansweep_logfile)) with open(scansweepHelper.scansweep_logfile, 'a') as f: f.write(('%s\n' % sanity_string)) delta = time.time() scantime = time.time() originaltime = time.time() if (scansweepHelper.monitor is not None): scansweepHelper.activatemonitors() while True: if ((time.time() - originaltime) > scansweepHelper.maxtime): dsz.ui.Echo( ('Maxtime of %s has been exceeded. Exiting.' % ops.timehelper.get_age_from_seconds(scansweepHelper.maxtime)), dsz.ERROR) break scan_job = scanbase.get_job(scansweepHelper.session) if (scan_job == False): if (scansweepHelper.monitor is None): break else: try: target = scan_job[1] job_info = scan_job[0].split('|') job_type = job_info[0] if (not util.ip.validate(target)): target = scansweepHelper.resolvehostname(target) if (target == None): continue target_scanner = scanengine2.get_scanengine( scan_job, scansweepHelper.timeout) target_scanner.execute_scan(False) if target_scanner.multiple_responses: multi_response = target_scanner.return_data() for response in multi_response: scanbase.write_result(scansweepHelper.session, response.scan_type, response.target, response.return_data(), response.success, scan_job[0]) else: scanbase.write_result(scansweepHelper.session, target_scanner.scan_type, target_scanner.target, target_scanner.return_data(), target_scanner.success, scan_job[0]) if target_scanner.success: succ_out_string = ( '[%s] %s (%s jobs remaining)' % (target_scanner.timestamp, target_scanner.return_success_message(), scanbase.num_jobs(scansweepHelper.session))) dsz.ui.Echo(succ_out_string) with open(scansweepHelper.scansweep_logfile, 'a') as f: f.write(('%s\n' % succ_out_string)) rulelist = scanbase.get_escalate_rules(scansweepHelper.session) for rule in rulelist: if target_scanner.check_escalation(rule[0]): if (rule[1] == 'alert'): if (target_scanner.success == True): esc_output_string = ( '[%s]\t\tAlerting on %s by rule: (%s->%s)' % (dsz.Timestamp(), target, rule[0], rule[1])) else: esc_output_string = ( '[%s] Alerting on %s by rule: (%s->%s)' % (dsz.Timestamp(), target, rule[0], rule[1])) scansweepHelper.alert(esc_output_string) dsz.ui.Echo(esc_output_string, dsz.WARNING) else: add_succ = scansweepHelper.addtoqueue( rule[1], target, scansweepHelper.scansweep_env) if ((target_scanner.success == True) and add_succ): esc_output_string = ( '[%s]\t\tEscalating %s by rule: (%s->%s) (%s jobs remaining)' % (dsz.Timestamp(), target, rule[0], rule[1], scanbase.num_jobs( scansweepHelper.session))) elif add_succ: esc_output_string = ( '[%s] Escalating %s by rule: (%s->%s) (%s jobs remaining)' % (dsz.Timestamp(), target, rule[0], rule[1], scanbase.num_jobs( scansweepHelper.session))) dsz.ui.Echo(esc_output_string) with open(scansweepHelper.scansweep_logfile, 'a') as f: f.write(('%s\n' % esc_output_string)) except Exception as e: if dsz.ui.Prompt( ('The current job failed for some reason. Would you like to quit? %s' % e), False): break else: continue if scansweepHelper.monitor: for monitor_handler in scansweepHelper.monitorengines: found_connections = monitor_handler.execute_monitor() for connection in found_connections: rulelist = scanbase.get_escalate_rules( scansweepHelper.session) for rule in rulelist: if monitor_handler.check_escalation( rule[0], connection): found = False add_succ = True if (not scansweepHelper.internaloverride): for network in scansweepHelper.local_networks: if util.ip.validate_ipv6( connection.target): if (util.ip.expand_ipv6( connection.target)[:19] == network[1]): found = True break elif ((not (network[0] == '')) and (scansweepHelper.getnetwork( connection.target, util.ip.get_cidr_from_subnet( network[0])) == network[1])): found = True break if ((not scansweepHelper.internaloverride) and (not found)): esc_output_string = ( '[%s] Escalation failed (outside subnet) %s by rule: (%s->%s) (%s jobs remaining)' % (dsz.Timestamp(), connection.target, rule[0], rule[1], scanbase.num_jobs( scansweepHelper.session))) dsz.ui.Echo(esc_output_string, dsz.WARNING) elif (rule[1] == 'alert'): esc_output_string = ( '[%s] Alerting on %s by rule: (%s->%s)' % (dsz.Timestamp(), connection.target, rule[0], rule[1])) scansweepHelper.alert(esc_output_string) dsz.ui.Echo(esc_output_string, dsz.WARNING) else: add_succ = scansweepHelper.addtoqueue( rule[1], connection.target, scansweepHelper.scansweep_env) if add_succ: esc_output_string = ( '[%s] Escalating %s by rule: (%s->%s) (%s jobs remaining)' % (dsz.Timestamp(), connection.target, rule[0], rule[1], scanbase.num_jobs( scansweepHelper.session))) dsz.ui.Echo(esc_output_string) if add_succ: with open(scansweepHelper.scansweep_logfile, 'a') as f: f.write(('%s\n' % esc_output_string)) newdelta = time.time() num_remaining = scanbase.num_jobs(scansweepHelper.session) if ((((num_remaining % 10) == 0) and (not (num_remaining in alreadyoutput))) or ((newdelta - delta) > (5 * 60))): maxremaining = int( (scansweepHelper.maxtime - (time.time() - originaltime))) sanity_string = ( '[%s] Sanity output: %s jobs remaining, %s-%s remaining (max %s), %0.1fs since last sanity' % (dsz.Timestamp(), num_remaining, ops.timehelper.get_age_from_seconds( (num_remaining * scansweepHelper.min_seconds)), ops.timehelper.get_age_from_seconds( (num_remaining * scansweepHelper.max_seconds)), ops.timehelper.get_age_from_seconds(maxremaining), (newdelta - delta))) dsz.ui.Echo(sanity_string, dsz.GOOD) with open(scansweepHelper.scansweep_logfile, 'a') as f: f.write(('%s\n' % sanity_string)) scansweepHelper.showstats() alreadyoutput.append( scanbase.num_jobs(scansweepHelper.scansweep_env)) delta = newdelta resultstotal = 0 type_list = scanbase.get_jobtypes(scansweepHelper.session) for type in type_list: resultstotal = (resultstotal + scansweepHelper.findlistsize(type)) if (not (lastresults == resultstotal)): scansweepHelper.generateresults(quiet=True) lastresults = resultstotal if scanbase.check_kill(scansweepHelper.session): dsz.ui.Echo(('This session (%s) is marked for death. Exiting.' % scansweepHelper.session), dsz.ERROR) break if ((not (scanbase.num_jobs(scansweepHelper.session) == 0)) or scansweepHelper.monitor): sleep_in_secs = random.randint(scansweepHelper.min_seconds, scansweepHelper.max_seconds) if (not scansweepHelper.nowait): if scansweepHelper.verbose: dsz.ui.Echo(('[%s] Sleeping for %s seconds...' % (dsz.Timestamp(), sleep_in_secs))) try: dsz.Sleep((sleep_in_secs * 1000)) except exceptions.RuntimeError as e: dsz.ui.Echo(('%s' % e), dsz.ERROR) break elif ((time.time() - scantime) < sleep_in_secs): nowaitsleep = int((sleep_in_secs - floor( (time.time() - scantime)))) if scansweepHelper.verbose: dsz.ui.Echo(( '[%s] Sleeping for %s seconds (%s seconds remain)...' % (dsz.Timestamp(), sleep_in_secs, nowaitsleep))) try: dsz.Sleep((sleep_in_secs * 1000)) except exceptions.RuntimeError as e: dsz.ui.Echo(('%s' % e), dsz.ERROR) break elif scansweepHelper.verbose: dsz.ui.Echo( ('[%s] Would sleep for %s seconds but we are overdue...' % (dsz.Timestamp(), sleep_in_secs))) scantime = time.time() if scanbase.check_kill(scansweepHelper.session): dsz.ui.Echo(('This session (%s) is marked for death. Exiting.' % scansweepHelper.session), dsz.ERROR) break
else: sys.exit((-1)) mondata = cmd.execute() voldb = ops.db.get_voldb() targetID = ops.project.getTargetID() if options.savetotarget: tdb = ops.db.get_tdb() if (mondata is not None): vol_cache_id = voldb.save_ops_object(mondata, tag=options.tag, targetID=targetID) if options.savetotarget: tdb_cache_id = tdb.save_ops_object(mondata, tag=options.tag) while mondata.commandmetadata.isrunning: try: dsz.Sleep((options.interval * 1000)) mondata.update() voldb.save_ops_object(mondata, cache_id=vol_cache_id, tag=options.tag, targetID=targetID) if options.savetotarget: tdb.save_ops_object(mondata, cache_id=tdb_cache_id, tag=options.tag) except KeyboardInterrupt: ops.error('User killed channel!') sys.exit((-1)) mondata.update() voldb.save_ops_object(mondata, cache_id=vol_cache_id, tag=options.tag) tdb.save_ops_object(mondata, cache_id=tdb_cache_id, tag=options.tag)
def main(args): parser = ArgumentParser() group_target = parser.add_argument_group( 'Target', 'Options that describe the event log to query') group_target.add_argument( '--log', action='store', dest='log', default='security', help='The event log to search. Default = Security') group_target.add_argument('--target', action='store', dest='target', help='Remote machine to query') group_limiters = parser.add_argument_group( 'Limiters', 'Options that limit the range over which we are searching') group_limiters.add_argument( '--num', action='store', dest='num', default=1000, type=int, help= "The number of entries to parse. A value of zero will result in all entries being parsed. Parsing will cease once the first 1000 records are found unless the 'max' keyword is used." ) group_limiters.add_argument( '--max', action='store', dest='max', default=100, type=int, help= 'Maximum entries returned from the target. Default=1000. A value of 0 will result in all possible entries returned. It is recommended that a value of 0 not be used due to the fact that a large database could result in an excessive number of entries being parsed and cause a slowdown in the speed and memory usage of the LP.' ) group_limiters.add_argument( '--startrecord', action='store', dest='startrecord', help= 'Record with which to begin filtering. Default = Most recent record.') group_filters = parser.add_argument_group( 'Filters', 'Options that describe what we are looking for') group_filters.add_argument( '--id', action='store', dest='id', help='The Event ID on which to filter. Default = No filtering.') group_filters.add_argument( '--logons', action='store_true', dest='logons', default=False, help='Eventlogfilter for common authentication logs') group_filters.add_argument( '--string', action='store', dest='string_opt', help='String in entry on which to filter. Default = No filtering.') group_filters.add_argument( '--sid', action='store', dest='sid', help='Username on which to filter. Default = No filtering.') group_filters.add_argument('--xpath', action='store', dest='xpath', help='XPath expression for search.') group_output = parser.add_argument_group('Output', 'Options that change the output') group_output.add_argument( '--summary', action='store_true', dest='summary', default=False, help='Display a list of the strings associated with each event record') group_monitor = parser.add_argument_group( 'Monitor', 'Options that deal with monitoring') group_monitor.add_argument( '--monitor', action='store_true', dest='monitor', default=False, help= 'Execute the eventlogfilter command at a given interval and display any new results' ) group_monitor.add_argument('--interval', action='store', dest='interval', default='5m', type=ops.timehelper.get_seconds_from_age, help='Interval at which to monitor') options = parser.parse_args() last_record = 0 newest_record = 0 querymax = options.max querynum = options.num startrecord = options.startrecord while True: if options.monitor: newest_record = getmostrecentrecordnum(eventlog=options.log) if (not (last_record == 0)): querynum = (newest_record - last_record) startrecord = newest_record querymax = querynum if (querymax == 0): dsz.ui.Echo(('[%s] No new records' % ops.timestamp()), dsz.WARNING) dsz.Sleep((options.interval * 1000)) continue dsz.ui.Echo(('=' * 80), dsz.GOOD) eventlogtime(log=options.log, max=querymax, num=querynum, id_list=options.id, sid=options.sid, string_opt_list=options.string_opt, startrecord=startrecord, xpath=options.xpath, target=options.target, summary=options.summary, logons=options.logons) last_record = newest_record if (not options.monitor): return dsz.Sleep((options.interval * 1000))