def cleanup(nodes, cleantmp=False): hadError = False util.output("cleaning up nodes ...") result = isRunning(nodes) running = [node for (node, on) in result if on] notrunning = [node for (node, on) in result if not on] results1 = execute.rmdirs([(n, n.cwd()) for n in notrunning]) results2 = execute.mkdirs([(n, n.cwd()) for n in notrunning]) if nodeFailed(results1) or nodeFailed(results2): hadError = True for node in notrunning: node.clearCrashed() for node in running: util.output(" %s is still running, not cleaning work directory" % node.name) if cleantmp: results3 = execute.rmdirs([(n, config.Config.tmpdir) for n in running + notrunning]) results4 = execute.mkdirs([(n, config.Config.tmpdir) for n in running + notrunning]) if nodeFailed(results3) or nodeFailed(results4): hadError = True return not hadError
def cleanup(nodes, cleantmp=False): util.output("cleaning up nodes ...") result = isRunning(nodes) running = [node for (node, on) in result if on] notrunning = [node for (node, on) in result if not on] execute.rmdirs([(n, n.cwd()) for n in notrunning]) execute.mkdirs([(n, n.cwd()) for n in notrunning]) for node in notrunning: node.clearCrashed(); for node in running: util.output(" %s is still running, not cleaning work directory" % node.name) if cleantmp: execute.rmdirs([(n, config.Config.tmpdir) for n in running + notrunning]) execute.mkdirs([(n, config.Config.tmpdir) for n in running + notrunning])
def cleanup(nodes, cleantmp=False): util.output("cleaning up nodes ...") result = isRunning(nodes) running = [node for (node, on) in result if on] notrunning = [node for (node, on) in result if not on] execute.rmdirs([(n, n.cwd()) for n in notrunning]) execute.mkdirs([(n, n.cwd()) for n in notrunning]) for node in notrunning: node.clearCrashed() for node in running: util.output(" %s is still running, not cleaning work directory" % node.name) if cleantmp: execute.rmdirs([(n, config.Config.tmpdir) for n in running + notrunning]) execute.mkdirs([(n, config.Config.tmpdir) for n in running + notrunning])
def _startNodes(nodes): results = [] filtered = [] # Ignore nodes which are still running. for (node, isrunning) in isRunning(nodes): if not isrunning: filtered += [node] util.output("starting %s ..." % node.name) else: util.output("%s still running" % node.name) nodes = filtered # Generate crash report for any crashed nodes. crashed = [node for node in nodes if node.hasCrashed()] _makeCrashReports(crashed) # Make working directories. dirs = [(node, node.cwd()) for node in nodes] nodes = [] for (node, success) in execute.mkdirs(dirs): if success: nodes += [node] else: util.output("cannot create working directory for %s" % node.name) results += [(node, False)] # Start Bro process. cmds = [] envs = [] for node in nodes: cmds += [(node, "start", [node.cwd()] + _makeBroParams(node, True))] envs += [_makeEnvParam(node)] nodes = [] for (node, success, output) in execute.runHelperParallel(cmds, envs=envs): if success: nodes += [node] node.setPID(int(output[0])) else: util.output("cannot start %s" % node.name) results += [(node, False)] # Check whether processes did indeed start up. hanging = [] running = [] for (node, success) in waitForBros(nodes, "RUNNING", 3, True): if success: running += [node] else: hanging += [node] # It can happen that Bro hangs in DNS lookups at startup # which can take a while. At this point we already know # that the process has been started (waitForBro ensures that). # If by now there is not a TERMINATED status, we assume that it # is doing fine and will move on to RUNNING once DNS is done. for (node, success) in waitForBros(hanging, "TERMINATED", 0, False): if success: util.output("%s terminated immediately after starting; check output with \"diag\"" % node.name) node.clearPID() results += [(node, False)] else: util.output("(%s still initializing)" % node.name) running += [node] for node in running: cron.logAction(node, "started") results += [(node, True)] return results
if n == manager: continue if not execute.isAlive(n.addr): continue nodes += [n] if config.Config.havenfs != "1": # Non-NFS, need to explicitly synchronize. dirs = [] for dir in [config.Config.subst(dir) for (dir, mirror) in Syncs if not mirror]: dirs += [(n, dir) for n in nodes] for (node, success) in execute.mkdirs(dirs): if not success: util.warn("cannot create directory %s on %s" % (dir, node.name)) paths = [config.Config.subst(dir) for (dir, mirror) in Syncs if mirror] execute.sync(nodes, paths) util.output("done.") # Note: the old code created $brobase explicitly but it seems the loop above should # already take care of that. else: # NFS. We only need to take care of the spool/log directories. paths = [config.Config.spooldir] paths += [config.Config.tmpdir]
if not execute.isAlive(n.addr): continue nodes += [n] if config.Config.havenfs != "1": # Non-NFS, need to explicitly synchronize. dirs = [] for dir in [ config.Config.subst(dir) for (dir, mirror) in Syncs if not mirror ]: dirs += [(n, dir) for n in nodes] for (node, success) in execute.mkdirs(dirs): if not success: util.warn("cannot create directory %s on %s" % (dir, node.name)) paths = [config.Config.subst(dir) for (dir, mirror) in Syncs if mirror] execute.sync(nodes, paths) util.output("done.") # Note: the old code created $brobase explicitly but it seems the loop above should # already take care of that. else: # NFS. We only need to take care of the spool/log directories. paths = [config.Config.spooldir] paths += [config.Config.tmpdir]
def _startNodes(nodes): results = [] filtered = [] # Ignore nodes which are still running. for (node, isrunning) in isRunning(nodes): if not isrunning: filtered += [node] util.output("starting %s ..." % node.name) else: util.output("%s still running" % node.name) nodes = filtered # Generate crash report for any crashed nodes. crashed = [node for node in nodes if node.hasCrashed()] _makeCrashReports(crashed) # Make working directories. dirs = [(node, node.cwd()) for node in nodes] nodes = [] for (node, success) in execute.mkdirs(dirs): if success: nodes += [node] else: util.output("cannot create working directory for %s" % node.name) results += [(node, False)] # Start Bro process. cmds = [] envs = [] for node in nodes: cmds += [(node, "start", [node.cwd()] + _makeBroParams(node, True))] envs += [_makeEnvParam(node)] nodes = [] for (node, success, output) in execute.runHelperParallel(cmds, envs=envs): if success: nodes += [node] node.setPID(int(output[0])) else: util.output("cannot start %s" % node.name) results += [(node, False)] # Check whether processes did indeed start up. hanging = [] running = [] for (node, success) in waitForBros(nodes, "RUNNING", 3, True): if success: running += [node] else: hanging += [node] # It can happen that Bro hangs in DNS lookups at startup # which can take a while. At this point we already know # that the process has been started (waitForBro ensures that). # If by now there is not a TERMINATED status, we assume that it # is doing fine and will move on to RUNNING once DNS is done. for (node, success) in waitForBros(hanging, "TERMINATED", 0, False): if success: util.output( "%s terminated immediately after starting; check output with \"diag\"" % node.name) node.clearPID() results += [(node, False)] else: util.output("(%s still initializing)" % node.name) running += [node] for node in running: cron.logAction(node, "started") results += [(node, True)] return results