def parse_to_json(self): if self.artifact_type is not None: try: self.parsed_data = self.artifact_type.parse_to_json( self.raw_data) except Exception as e: es = ElasticUtil() es.log_error("ParseError: " + e.message) """
def zmqsender(msg): try: context = zmq.Context() zmq_socket = context.socket(zmq.PUSH) zmq_socket.bind("tcp://{}:{}".format(ZMQ_SERVER_IP, ZMQ_WORKER_PORT)) zmq_socket.send(msg) zmq_socket.close() except Exception as e: es = ElasticUtil() es.log_error("ZMQSender SendError: " + e.message)
def compare_threads(artifacts_list, addr): print "Compare threads" try: # for artifact in artifacts_list: # if artifact.artifact_type is UserThreads: # list1 = artifact.parsed_data # elif artifact.artifact_type is KernelThreads: # list2 = artifact.parsed_data list1 = artifacts_list["userThreads"].parsed_data list2 = artifacts_list["kernelThreads"].parsed_data # Get a list of threads that are not in both lists diff_list = [ i for i in list1 + list2 if i not in list1 or i not in list2 ] # Send the list to elastic with the information of which list the thread was found # and which list the thread was missing es_util = ElasticUtil() if len(diff_list) != 0: for tup in diff_list: if tup in list1: doc = { "timestamp": datetime.utcnow(), "IP": addr, "UserThread.PID": tup[0], "UserThread.TID": tup[-1], "inUser:"******"inKernel:": False } # Connection successful es_util.send_to_elastic("gymic-comparethreads", "ThreadCompare", doc) elif tup in list2: doc = { "timestamp": datetime.utcnow(), "IP": addr, "KernelThread.PID": tup[0], "KernelThread.TID": tup[-1], "inUser:"******"inKernel:": True } # Connection successful es_util.send_to_elastic("gymic-comparethreads", "ThreadCompare", doc) except Exception as e: es = ElasticUtil() es.log_error("CompareThread Error: " + e.message)
def tcpserver(): try: # Initialize server sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind((TCP_SERVER_IP, TCP_SERVER_PORT)) sock.listen(10) except Exception as e: es = ElasticUtil() es.log_error("TCPServer BindError: " + e.message) completed = "" # Infinite loop to receive data from clients while True: conn, addr = sock.accept() try: while True: data = conn.recv(65536) completed = completed + data if "End" in completed: msg = {"data": completed[:completed.find("End")], "addr": addr[0]} # Send the data to zmqserver zmqsender(json.dumps(msg)) completed = completed[completed.find("End") + 7:] else: break except Exception as e: es = ElasticUtil() es.log_error("TCPServer ReceiveError: " + e.message)
def main(): try: # initializing zmqserver and workers and their function thread_zmqserver = threading.Thread(target=zmqserver) thread_zmqserver.daemon = True thread_zmqserver.start() workers = [] for i in xrange(NUM_OF_WORKERS): worker = threading.Thread(target=zmqworker) worker.daemon = True workers.append(worker) worker.start() except Exception as e: es = ElasticUtil() es.log_error("Threads StartError: " + e.message) tcpserver()
def send_to_elastic(parsed_data, addr): es_util = ElasticUtil() for line in parsed_data: try: doc = { "timestamp": datetime.utcnow(), "IP": addr, "UserNetstat.Protocol": line[0], "UserNetstat.LocalAddress": line[1], "UserNetstat.LocalPort": line[2], "UserNetstat.RemoteAddress": line[3], "UserNetstat.RemotePort": line[4], "UserNetstat.State": line[5], "UserNetstat.PID": line[6], "UserNetstat.ProgramName": line[-1] } # Connection successful es_util.send_to_elastic("gymic-usernetstat", "UserNetstat", doc) except Exception as e: # Connection unsuccessful. es_util.log_error("UserNetstat send error: " + e.message)
def searchForMiner(artifacts_list, addr): print "Miner finder" try: procs = artifacts_list["userProcess"].parsed_data net = artifacts_list["userNetwork"].parsed_data # Get process names for communicating processes netProcs = [] for line in net: netProcs.append(line[-1]) es_util = ElasticUtil() # Call model building method global mlModel global lableEncoderProc global lableEncoderUser if mlModel == None: created_model = minerMLMode_createModel() mlModel = created_model[0] lableEncoderProc = created_model[1] lableEncoderUser = created_model[2] # Inspect each process for proc in procs: if proc[2] != "": # Set communication value comm = 50 if proc[2] in netProcs else 0 try: if minerMLMode_inspect(mlModel, lableEncoderProc, lableEncoderUser, proc, comm) == 1: print "ML FOUND PROC : " + proc[2] doc = { "timestamp": datetime.utcnow(), "IP": addr, "UserProcesses.PID": proc[0], "UserProcesses.CPU": proc[1], "UserProcesses.ProcessName": proc[2] } es_util.send_to_elastic("gymic-miner", "MinerFinder", doc) except Exception as e: continue except Exception as e: es = ElasticUtil() es.log_error("MinerFinder Error: " + e.message) print "Miner: " + e.message
def zmqserver(): context = zmq.Context() server_socket = context.socket(zmq.PULL) try: server_socket.bind("tcp://{}:{}".format(ZMQ_SERVER_IP, ZMQ_SERVER_PORT)) except Exception as e: es = ElasticUtil() es.log_error("ZMQServer BindError: " + e.message) while True: try: while True: result = server_socket.recv_json() if result.has_key('worker_id'): print "Worker {} finished printing".format(result["worker_id"]) except Exception as e: es = ElasticUtil() es.log_error("ZMQServer ReceiveError: " + e.message)
def send_to_elastic(parsed_data, addr): es_util = ElasticUtil() for line in parsed_data: try: doc = {"timestamp": datetime.utcnow(), "IP": addr, "KernelProccesess.PID": line[0], "KernelProcesses.ProcessName": line[-1]} # Connection successful es_util.send_to_elastic("gymic-kernelprocesses", "KernelProcesses", doc) except Exception as e: # Connection unsuccessful. es_util.log_error("KernelProcesses send error: " + e.message)
def send_to_elastic(parsed_data, addr): es_util = ElasticUtil() for line in parsed_data: try: doc = {"timestamp": datetime.utcnow(), "IP": addr, "UserModules.ModuleName": line} # Connection successful es_util.send_to_elastic("gymic-usermodules", "UserModules", doc) except Exception as e: # Connection unsuccessful. es_util.log_error("UserModules send error: " + e.message)
def send_to_elastic(parsed_data, addr): es_util = ElasticUtil() for line in parsed_data: try: doc = { "timestamp": datetime.utcnow(), "IP": addr, "UserThreads.TID": line[-1], "UserThreads.PID": line[0] } # Connection successful es_util.send_to_elastic("gymic-userthreads", "UserThreads", doc) except Exception as e: # Connection unsuccessful. es_util.log_error("UserThreads send error: " + e.message)
def send_to_elastic(parsed_data, addr): es_util = ElasticUtil() for line in parsed_data: try: pid = line[0] name = line[2] cpu = line[1] user = line[3] doc = {"timestamp": datetime.utcnow(), "IP": addr, "UserProccesess.PID": pid, "UserProcesses.ProcessName": name, "UserProcesses.CPU": cpu, "UserProcesses.USER": user} # Connection successful es_util.send_to_elastic("gymic-userprocesses", "UserProcesses", doc) except Exception as e: # Connection unsuccessful. es_util.log_error("UserProcesses send error: " + e.message)
def zmqworker(): # Initialize worker worker_id = random.randrange(1, 10005) print "Worker {0} has started.".format(worker_id) context = zmq.Context() pull_socket = context.socket(zmq.PULL) try: pull_socket.connect("tcp://{}:{}".format(ZMQ_SERVER_IP, ZMQ_WORKER_PORT)) except Exception as e: es = ElasticUtil() es.log_error("ZMQWorker PullConnectError: " + e.message) push_socket = context.socket(zmq.PUSH) try: push_socket.connect("tcp://{}:{}".format(ZMQ_SERVER_IP, ZMQ_SERVER_PORT)) except Exception as e: es = ElasticUtil() es.log_error("ZMQWorker PushConnectError: " + e.message) while True: try: while True: # Wait for next request from client msg_json = pull_socket.recv() msg_dic = json.loads(msg_json) msg = msg_dic.get("data") addr = msg_dic.get("addr") print "Worker {0} Received request: {1}".format(worker_id, msg) if msg is not None: # Code for actual work result = {"worker_id" : worker_id, 'data' : msg} # Check the data type and act accordingly (parse and analyze data and send it to elastic) if msg.startswith("gymic_finish_thread"): compare_threads(output_dict[addr], addr) elif msg.startswith("gymic_finish_proc"): compare_proc(output_dict[addr], addr) try: net = output_dict[addr]["userNetwork"] searchForMiner(output_dict[addr], addr) except KeyError: pass elif msg.startswith("gymic_finish_mod"): compare_modules(output_dict[addr], addr) elif msg.startswith("gymic_finish_net"): try: proc = output_dict[addr]["userProcess"] searchForMiner(output_dict[addr], addr) except KeyError: pass if "finish" not in msg: artifact = Artifact(msg, addr) artifact.parse_to_json() artifact.send_to_elastic() # Add to output dictionary if artifact.artifact_type is not None: if output_dict.has_key(addr): try: output_dict[addr][artifact.artifact_header] = artifact except KeyError: pass else: output_dict[addr] = {} output_dict[addr][artifact.artifact_header] = artifact push_socket.send_json(result) except Exception as e: es = ElasticUtil() es.log_error("ZMQWorker ReceiveError: " + e.message)
def recv_dump(ip): try: output_dir = os.path.join(os.getcwd(), 'Dumps') if not os.path.exists(output_dir): os.mkdir(output_dir) output_path = os.path.join( output_dir, datetime.now().strftime("%Y-%m-%d %H-%M-%S") + " - " + ip + ".lime") with open(output_path, 'wb') as out: try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((ip, LIME_PORT)) except Exception as e: es = ElasticUtil() es.log_error("DumpReceive ConnectError: " + e.message) while True: mem_data = s.recv(1024) if not mem_data: break out.write(mem_data) es = ElasticUtil() es.log("Successfully dumped memory to " + output_path) except Exception as e: es = ElasticUtil() es.log_error("DumpReceive CreateError: " + e.message)
def compare_proc(artifacts_list, addr): print "Compare processes" try: list2 = list1 = [] diff_list = [] irelevant_processes = [ "ksoftirqd", "rcu_sched", "insmod", "system-udevd", "ps", "sh", "lsched", "rcuos", "rcuob", "user" ] #Signal to Workstation to not take a dump # s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # s.connect((addr, LIME_PORT)) # s.send("No") # s.close() #return list1 = artifacts_list["userProcess"].parsed_data list2 = artifacts_list["kernelProcesses"].parsed_data if not (len(list1) == 0 or len(list2) == 0): tempList = [] #print "l1: " + str(len(list1)) #print "l2: " + str(len(list2)) for proc in list1: tempList.append((proc[0], proc[2])) # Get a list of processes that are not in both lists diff_list = [ i for i in tempList + list2 if i not in tempList or i not in list2 ] print diff_list print "diff prev: " + str(len(diff_list)) # Delete from the diff list procceses that we know that suppose to be there for proc in list(diff_list): if proc[1] != '': for p in irelevant_processes: if p in proc[1]: diff_list.remove(proc) break else: diff_list.remove(proc) print diff_list print "diff new: " + str(len(diff_list)) #if not is_dumped: # Creating a socket so the server will memdump the workstation if the list isn't empty try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((addr, LIME_PORT)) if len(diff_list) == 0: s.send("No") s.close() else: s.send("Yes") s.close() #is_dumped = True recv_dump(addr) except: pass # Send the list to elastic with the information of which list the process was found # and which list the process was missing es_util = ElasticUtil() for tup in diff_list: if tup in list1: doc = { "timestamp": datetime.utcnow(), "IP": addr, "UserProcesses.PID": tup[0], "UserProcesses.ProcessName": tup[-1], "inUser:"******"inKernel:": False } # Connection successful es_util.send_to_elastic("gymic-compareprocess", "ProcessCompare", doc) elif tup in list2: doc = { "timestamp": datetime.utcnow(), "IP": addr, "KernelProcesses.PID": tup[0], "KernelProcesses.ProcessName": tup[-1], "inUser:"******"inKernel:": True } # Connection successful es_util.send_to_elastic("gymic-compareprocess", "ProcessCompare", doc) print "Sent diffs" except Exception as e: es = ElasticUtil() es.log_error("CompareProc Error: " + e.message) print e.message
def compare_modules(artifacts_list, addr): print "Compare modules" try: # Signal to Workstation to not take a dump # s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # s.connect((addr, LIME_PORT)) # s.send("No") # s.close() # return list3 = list2 = list1 = [] list1 = artifacts_list["userModule"].parsed_data list2 = artifacts_list["kernelModule"].parsed_data list3 = artifacts_list["sysModule"].parsed_data # Get a list of modules that are not in both lists diff_list1 = [ i for i in list1 + list2 if i not in list1 or i not in list2 ] diff_list2 = [ i for i in list1 + list3 if i not in list1 or i not in list3 ] diff_list = list(set(diff_list1 + diff_list2)) #if not is_dumped: es_util = ElasticUtil() # Creating a socket so the server will memdump the workstation if the list isn't empty try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((addr, LIME_PORT)) if len(diff_list2) == 0: s.send("No") s.close() else: s.send("Yes") s.close() # is_dumped = True recv_dump(addr) except: pass # Send the list to elastic with the information of which list the module was found # and which list the module was missing for module in diff_list: if module: if module.isalpha(): inUser = False inKernel = False inSys = False if module in list1: inUser = True if module in list2: inKernel = True if module in list3: inSys = True doc = { "timestamp": datetime.utcnow(), "IP": addr, "KernelModules.ModuleName": module, "inUser:"******"inKernel:": inKernel, "inSys:": inSys } # Connection successful es_util.send_to_elastic("gymic-comparemodule", "ModulesCompare", doc) except Exception as e: es = ElasticUtil() es.log_error("CompareModule Error: " + e.message)