def clean(self): """ Clean the search engine """ if not self.es: return log.debug("Destroy index %s in search engine" % self.index) self.es.indices.delete(index=self.index, ignore=[400, 404])
def __allocate_directory(self): root_path = config.config['server']['data_path'] if not os.path.isdir(root_path): log.debug("Root directory created: %s" % root_path) os.makedirs(root_path) dir = root_path + self.uuid + "/" os.makedirs(dir) log.debug("Allocate temporary directory %s" % dir) return dir
def __clean_sniffer(self): """ Empty sniff of captured packets. :return: """ log.debug("Clean sniffers") for node in self.net.nodes: for sniffer in node.server.sniffers: sniffer.packets = []
def run(self): for r in self.request: action, args = r.iteritems().next() if action in self.cmd: log.debug("Executing network action: %s(%s)" % (action, args)) self.cmd[action](args) else: log.error("Network action %s not found" % action) self.send_graph(self.user.network)
def open_ssh_connexion(self): self.connection = paramiko.SSHClient() self.connection.load_system_host_keys() self.connection.set_missing_host_key_policy(paramiko.AutoAddPolicy()) self.connection.connect(self.host, port=22, username=self.username, password=self.passwd, key_filename=self.key_filename, timeout=10) self.transport = self.connection.open_sftp() log.debug("SSH connection established with %s" % self.host)
def packet_population(self, network): """ Transfer all search criterion packet to the search engine. """ if not self.es: return log.debug("Adding packet in search engine...") packets = [p for n in network.nodes for p in n.packet_list()] + \ [p for l in network.links for p in l.packet_list()] for p in packets: self.es.index(index=self.index, doc_type='packets', id=p.uuid, body=p.search_criterion()) log.debug("All packets added to the search engine")
def __set_to_network_level(self): """ Take all packet from the server level (in sniffer) and place it in the network level (edges and vertexes). Do packets correlation in the network level. :return: """ log.info("Set packet to network level (correlation)") node_server = [n.server for n in self.net.nodes] for node in self.net.nodes: for sniffer in node.server.sniffers: for packet in sniffer.packets: # Node packet if packet.internal or (packet.src and packet.dst and packet.src["server"] == packet.dst["server"]): node.set_packets(packet) packet.state = Packet.ST_NEW continue # Link packet if packet.src and packet.dst and packet.src[ 'server'] == node.server and packet.dst[ 'server'] in node_server: # Find other packet remote_packet = [ p for sniffer in packet.dst['server'].sniffers for p in sniffer.packets if packet.equals(p) ] if len(remote_packet) == 1: remote_packet = remote_packet[0] # Update info packet.dst = remote_packet.dst packet.state = Packet.ST_UPDATED elif len(remote_packet) > 2: log.warning("Multiple remote packet is found !") # TODO take the short time delta # Add to links links_matched = [ l for n, l in node.remote_vertex if n.server == packet.dst['server'] ] for l in links_matched: l.set_packets(packet) packet.state = Packet.ST_NEW log.debug("Packet correlation done for sniffer %s on %s" % (sniffer.__class__.__name__, node.server.name))
def packet_process(self): """ Process packet computation and give it the web interface: - Get packets from sniffers - Correlates packets to the network - Do correlations between packets - Execute search request """ # TODO improvement: don't wait the end of packet correlation if the search result is None if self.clean_packet: self.user.network.clean() # Collect packets from sniffer PacketCorrelationProcessor(self.user.network, self.user.directory, self.filter).collect() self.transfer_old = not self.real_time # TODO do threading # Do correlations between packets #p_stat = multiprocessing.Process(target=self.__gen_stat) #p_stat.start() self.__gen_stat() # Execute search request #p_search = multiprocessing.Process(target=self.__gen_search_engine) #p_search.start() self.__gen_search_engine() # Waiting the end of all execution #p_stat.join() #p_search.join() # Shrink packet set based on information computed before shrink_net = self.__shrink_packet_set() # Send data to network level log.debug("Sending packet data to Web UI") self.ws.write_message( json.dumps( dict(packets=dict(set=self.__gen_packet_list( self.ws.client.network.nodes))))) self.ws.write_message( json.dumps( dict(packets=dict(set=self.__gen_packet_list( self.ws.client.network.links))))) self.ws.write_message( json.dumps( dict(packets=dict(groups=self.__gen_packet_group( self.user.network.stat['packet_group'])))))
def __get_packets(self): """ Collect all packets from servers with a filter :return: """ log.info("Getting packets from remote devices") threads = [] try: for node in self.net.nodes: for sniffer in node.server.sniffers: th = threading.Thread(target=sniffer.get_packets, args=(self.filter, self.tmp_dir)) threads.append(th) th.start() log.debug("Waiting all download...") for th in threads: th.join() log.debug("Packets received") except Exception as e: log.error("Server execution:" + str(e)) return
def search(self, filter): """ Ask to the search engine to gives the most pertinent result based on the user filter :param filter: request user filter :return: list of packet UUID resulted of the search request """ serialized_filter = filter.copy() # TODO support time ? del serialized_filter['time'] if serialized_filter == {}: return None macthes = {"match": {c: v} for (c, v) in serialized_filter.iteritems()} result = self.es.search(index=self.index, doc_type='packets', body={"query": { "bool": { "must": macthes } }}) log.debug("Search engine found %s packets" % result['hits']['total']) return [h['_id'] for h in result['hits']['hits']]
def get_packets(self, filter, tmp_dir): transfer_path = "/tmp/brorig_transfer_{0!s}.pcap".format( base64.b32encode(uuid.uuid4().bytes)[:26]) local_path = os.path.join(tmp_dir, "log/pcap/") # Shrink the pcap trace base on the filter log.debug("Shrink the pcap file based on filters") t_start = filter['time']['start'].strftime("%Y-%m-%d %X") t_stop = filter['time']['stop'].strftime("%Y-%m-%d %X") shrink_cmd = 'sudo editcap -v -A "{start}" -B "{stop}" {f_remote} {t_path} > /dev/null 2> /dev/null'.format( start=t_start, stop=t_stop, f_remote=self.remote_file_path, t_path=transfer_path) connectivity.Script.remote_exe(self.server.ssh_connection, shrink_cmd) # Change file permission to allow transfer on no sudo user connectivity.Script.remote_exe( self.server.ssh_connection, "sudo chmod 644 {}".format(transfer_path)) # Download the trace to the server directory log.debug("Download pcap trace shrinked") if not os.path.exists(local_path): os.makedirs(local_path) local_path = os.path.join( local_path, '{time}-{server}.pcap'.format( time=filter['time']['stop'].strftime("%Y-%m-%dT%X"), server=self.server.key)) self.server.ssh_connection.open_ssh_connexion() trans = connectivity.Transfer(self.server.ssh_connection.transport) trans.get(transfer_path, local_path) # Remove transfer file rm_transfer_path_cmd = "sudo rm -rf {file}".format(file=transfer_path) connectivity.Script.remote_exe(self.server.ssh_connection, rm_transfer_path_cmd) # Close the connection self.server.ssh_connection.close_ssh_connexion() # Read the trace log.debug("Read pcap trace") PcapFileSniffer.get_packets(self, filter, local_path)
def destroy(self): shutil.rmtree(self.directory) self.clean() log.debug("Destroy user %s" % self.uuid)
def exe(self): # TODO fast remote execution (one line). Don't use remote transfer in tmp script # Define script name path_script = '/tmp/brorig_{0!s}'.format( base64.b32encode(uuid.uuid4().bytes)[:26]) # Create local script file if not self.file_name: f = open(path_script, 'w') f.write(self.code) f.close() chan = None # Transfer script to remote server if needed if self.exe_remote: self.connection.open_ssh_connexion() chan = self.connection.connection.get_transport().open_session() t = Transfer(self.connection.transport) t.put(self.file_name if self.file_name else path_script, path_script) # Script execution cmd = '{sudo}{interpret} {script} {args}'.format( sudo=("sudo " if self.sudo else ""), interpret=self.interpret, script=path_script, args=" ".join([("-" if len(str(arg)) == 1 else "--") + str(arg) + " " + str(val) for arg, val in self.args.iteritems()])) log.info("{1} code execution: {0:.100}".format( self.code, "Remote" if self.exe_remote else "Local")) log.debug("Launch {1} command: {0}".format( cmd, "remote" if self.exe_remote else "local")) if self.exe_remote: # Remote execution chan.exec_command(cmd) stdout = chan.makefile('r', -1) stderr = chan.makefile_stderr('r', -1) self.err = stderr.read() self.out = stdout.read() return_code = chan.recv_exit_status() else: # Local execution p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) self.out, self.err = p.communicate() return_code = p.returncode # Remove script os.remove(path_script) if self.exe_remote: self.connection.connection.exec_command( "rm -rf {}".format(path_script)) # Close remote connection if self.exe_remote: self.connection.close_ssh_connexion() # Error handler if return_code != 0 and not self.ignore_error: raise Exception('{1} script execution error: {0}'.format( self.err, "Remote" if self.exe_remote else "Local")) return self.out
def get(self, remote_path, local_path): log.debug("Download file from server %s to local %s" % (remote_path, local_path)) self.sftp.get(remote_path, local_path)
def close_ssh_connexion(self): self.transport.close() self.connection.close() del self.connection del self.transport log.debug("Remote connection (%s) closed" % self.host)
def group_packet(self): """ Group packet with seeming tags Algorithm: - Create a bipartite graph where one side is packets set and the other hand the tags set. Each link is the relation between the packet and the tag. - Compute Depth First Search to find all packets group. Indeed each connected component is a flow. :return: """ log.debug("Computing packet group...") packets = [p for n in self.net.nodes for p in n.packet_list()] + \ [p for l in self.net.links for p in l.packet_list()] # Compute network graph n_packet = [] n_packet_value = dict() n_tag = [] n_tag_value = dict() l = [] for p in packets: pn = dict(group=-1, value=p, links=[]) for tag in p.tags(): if tag not in n_tag_value: n_tag_value[tag] = len(n_tag) l.append((len(n_tag), len(n_packet))) n_tag.append(dict(group=-1, value=tag, links=[len(l) - 1])) else: l.append((n_tag_value[tag], len(n_packet))) n_tag[n_tag_value[tag]]['links'].append(len(l) - 1) pn['links'].append(len(l) - 1) n_packet_value[p] = len(n_packet) n_packet.append(pn) def dfs(node_index, is_packet, group_num): node = n_packet[node_index] if is_packet else n_tag[node_index] if node['group'] >= 0: # End of recursion: reach leaf return # Tag the node if is_packet: n_packet[node_index]['group'] = group_num else: n_tag[node_index]['group'] = group_num # Recursion for link in node['links']: dfs(l[link][int(not is_packet)], not is_packet, group_num) # Compute Depth First Search to get connected component log.debug("Doing depth first search on tag packet graph...") group_id = 0 for idx, node in enumerate(n_packet): if node['group'] < 0: # node without group dfs(idx, True, group_id) n_packet[idx]['group'] = group_id group_id += 1 # Convert graph to output structure log.debug("Convert tag packet graph into packet group") tag_group = dict() for n in n_packet: if n['group'] not in tag_group: tag_group[n['group']] = dict(tags=[], set=[], uuid=base64.b32encode( uuid.uuid4().bytes)[:26]) tag_group[n['group']]['set'].append(n['value']) for n in n_tag: tag_group[n['group']]['tags'].append(n['value']) log.debug("Saving packet group computation") self.net.stat['packet_group'] = tag_group