def execute_file(name: str, file: str, graph: SubgraphView, sender, msg_id): alpha_names = os.environ["MG_ALPHAS"].split(",") exec(file, globals()) try: pool = ThreadPool(processes=64) results = [] for node in graph.node_iter(): if not node.node.node_key: print( f'missing key {vars(node.node.node)} type: {type(node.node)}' ) continue if check_msg_cache(file, node.node.node_key, msg_id): print('cache hit - already processed') continue if check_hit_cache(name, node.node.node_key): print('cache hit - already matched') continue def exec_analyzer(analyzer, node, sender): try: client_stubs = [ DgraphClientStub(f"{a_name}:9080") for a_name in alpha_names ] client = DgraphClient(*client_stubs) analyzer(client, node, sender) return node except Exception as e: print(traceback.format_exc()) print(f'Execution of {name} failed with {e} {e.args}') sender.send(ExecutionFailed()) raise exec_analyzer(analyzer, node, sender) t = pool.apply_async(exec_analyzer, (analyzer, node, sender)) results.append(t) pool.close() for result in results: node = result.get() update_msg_cache(file, node.node.node_key, msg_id) sender.send(ExecutionComplete()) except Exception as e: print(traceback.format_exc()) print(f'Execution of {name} failed with {e} {e.args}') sender.send(ExecutionFailed()) raise
def execute_file( self, name: str, file: str, graph: SubgraphView, sender: Connection, msg_id: str, chunk_size: int, ) -> None: try: pool = ThreadPool(processes=4) exec(file, globals()) client = GraphClient() analyzers = get_analyzer_objects(client) if not analyzers: self.logger.warning(f"Got no analyzers for file: {name}") self.logger.info(f"Executing analyzers: {[an for an in analyzers.keys()]}") for nodes in chunker([n for n in graph.node_iter()], chunk_size): self.logger.info(f"Querying {len(nodes)} nodes") def exec_analyzer( nodes: List[BaseView], sender: Connection ) -> List[BaseView]: try: self.exec_analyzers( client, file, msg_id, nodes, analyzers, sender ) return nodes except Exception as e: self.logger.error(traceback.format_exc()) self.logger.error( f"Execution of {name} failed with {e} {e.args}" ) sender.send(ExecutionFailed()) raise pool.apply_async(exec_analyzer, args=(nodes, sender)) pool.close() pool.join() sender.send(ExecutionComplete()) except Exception as e: self.logger.error(traceback.format_exc()) self.logger.error(f"Execution of {name} failed with {e} {e.args}") sender.send(ExecutionFailed()) raise
def execute_file(name: str, file: str, graph: SubgraphView, sender, msg_id): alpha_names = os.environ["MG_ALPHAS"].split(",") try: pool = ThreadPool(processes=4) exec(file, globals()) client_stubs = [ DgraphClientStub(f"{a_name}:9080") for a_name in alpha_names ] client = DgraphClient(*client_stubs) analyzers = get_analyzer_objects(client) if not analyzers: print(f'Got no analyzers for file: {name}') print(f'Executing analyzers: {[an for an in analyzers.keys()]}') chunk_size = 100 if IS_RETRY == "True": chunk_size = 10 for nodes in chunker([n for n in graph.node_iter()], chunk_size): print(f'Querying {len(nodes)} nodes') def exec_analyzer(nodes, sender): try: exec_analyzers(client, file, msg_id, nodes, analyzers, sender) return nodes except Exception as e: print(traceback.format_exc()) print(f'Execution of {name} failed with {e} {e.args}') sender.send(ExecutionFailed()) raise exec_analyzer(nodes, sender) pool.apply_async(exec_analyzer, args=(nodes, sender)) pool.close() pool.join() sender.send(ExecutionComplete()) except Exception as e: print(traceback.format_exc()) print(f'Execution of {name} failed with {e} {e.args}') sender.send(ExecutionFailed()) raise
def execute_file(name: str, file: str, graph: SubgraphView, sender, msg_id): try: pool = ThreadPool(processes=4) exec(file, globals()) client_stubs = (DgraphClientStub(f"{host}:{port}") for host, port in mg_alphas()) client = DgraphClient(*client_stubs) analyzers = get_analyzer_objects(client) if not analyzers: LOGGER.warning(f"Got no analyzers for file: {name}") LOGGER.info(f"Executing analyzers: {[an for an in analyzers.keys()]}") chunk_size = 100 if IS_RETRY == "True": chunk_size = 10 for nodes in chunker([n for n in graph.node_iter()], chunk_size): LOGGER.info(f"Querying {len(nodes)} nodes") def exec_analyzer(nodes, sender): try: exec_analyzers(client, file, msg_id, nodes, analyzers, sender) return nodes except Exception as e: LOGGER.error(traceback.format_exc()) LOGGER.error( f"Execution of {name} failed with {e} {e.args}") sender.send(ExecutionFailed()) raise exec_analyzer(nodes, sender) pool.apply_async(exec_analyzer, args=(nodes, sender)) pool.close() pool.join() sender.send(ExecutionComplete()) except Exception as e: LOGGER.error(traceback.format_exc()) LOGGER.error(f"Execution of {name} failed with {e} {e.args}") sender.send(ExecutionFailed()) raise
def _analyzer(client: DgraphClient, graph: SubgraphView, sender: Connection): print(f'Analyzing {len(graph.nodes)} nodes') for node in graph.nodes.values(): node = node.node if not isinstance(node, ProcessView): continue print('Analyzing Process Node') response = query(client, node.node_key) print(response) if response: print(f"Got a response {response.node_key}") print(f"Debug view: {response.to_dict(root=True)}") sender.send( ExecutionHit( analyzer_name="svchost_unusual_parent", node_view=response, risk_score=50, ) ) sender.send(ExecutionComplete())