Exemplo n.º 1
0
    def _disconnect(self):
        """ Close up shop """

        logger.debug("Disconnecting from disk sensor.")

        self.connected = False

        # Close our socket
        if self.SOCK is not None:
            logger.debug("Closing socket.")
            self.SOCK.close()
            self.SOCK = None

        # Are we using a threaded reader?
        if self.packet_reader is not None:
            logger.debug("Killing packet reader thread.")
            try:
                logger.debug("Trying to kill reader thread.")
                self.packet_reader.stop()
                self.packet_reader = None
                logger.debug("Reader thread killed.")
            except:
                G.print_traceback()
                pass

        # Close our threaded process for reading?
        if self.read_queue is not None:
            logger.debug("Closing queue.")
            self.read_queue.close()
Exemplo n.º 2
0
def pack_analysis_list(anlaysis_list):
    """
            Given our list of active analysis will pack up a minimal version to 
            send to our master controller
        """

    # Init our protocol buffer
    analyses = analysis_pb2.AnalysisList()

    try:
        # Add required elements
        for a in anlaysis_list:
            # Get config
            ac = anlaysis_list[a]
            # Create a new object
            a2 = analyses.analysis.add()
            # Fill values
            a2.analysis_id = a
            a2.lophi_name = ac.lophi_config.name
            a2.machine_type = ac.machine.MACHINE_TYPE
            a2.machine_name = ac.machine.config.name
            a2.volatility_profile = ac.lophi_config.volatility_profile
            a2.created = ac.created
    except:
        G.print_traceback()

    # Serialize and return
    return analyses.SerializeToString()
Exemplo n.º 3
0
    def _disconnect(self):
        """ Close up shop """
        
        logger.debug("Disconnecting from disk sensor.")
        
        self.connected = False
        
        # Close our socket
        if self.SOCK is not None:
            logger.debug("Closing socket.")
            self.SOCK.close()
            self.SOCK = None
            
 
        
        # Are we using a threaded reader?         
        if self.packet_reader is not None:
            logger.debug("Killing packet reader thread.")
            try:
                logger.debug("Trying to kill reader thread.")
                self.packet_reader.stop()
                self.packet_reader = None
                logger.debug("Reader thread killed.")
            except:
                G.print_traceback()
                pass
            
        # Close our threaded process for reading?
        if self.read_queue is not None:
            logger.debug("Closing queue.")
            self.read_queue.close()  
Exemplo n.º 4
0
def pack_analysis_list(anlaysis_list):
        """
            Given our list of active analysis will pack up a minimal version to 
            send to our master controller
        """

        # Init our protocol buffer
        analyses = analysis_pb2.AnalysisList()

        try:
            # Add required elements
            for a in anlaysis_list:
                # Get config
                ac = anlaysis_list[a]
                # Create a new object
                a2 = analyses.analysis.add()
                # Fill values
                a2.analysis_id = a
                a2.lophi_name = ac.lophi_config.name
                a2.machine_type = ac.machine.MACHINE_TYPE
                a2.machine_name = ac.machine.config.name
                a2.volatility_profile = ac.lophi_config.volatility_profile
                a2.created = ac.created
        except:
            G.print_traceback()

        # Serialize and return
        return analyses.SerializeToString()
 def run(self):
     """
         Run forever grabbing data from the VM and forwarding it to listening
         clients 
     """
     # Get our meta data which is always the first packet sent
     meta = MetaHeader()
     meta_data = self._conn.recv(len(meta))
     if len(meta_data) == 0:
         logger.debug("VM Disconnected.")
         self._conn.close()
         return
     
     meta._unpack(meta_data)
     
     #logger.debug("Got meta data")
     #logger.debug(meta)
     
     # Store our filename for this VM
     # NOTE: We must strip the null chars off or comparisons will fail!
     filename = meta.filename.strip("\x00")
     
     # Create our sensor packet, and save its default size
     sensor_packet = DiskSensorPacket()
     sensor_header_size = len(sensor_packet)
     
     # Read packets forever
     while True:
         
         # Read and unpack our header
         header_data = self._conn.recv(sensor_header_size)
         if len(header_data) == 0:
             #logger.debug("VM Disconnected.")
             break
         
         sensor_packet._unpack(header_data)
         
         # Get the accompanying data
         try:
             sensor_packet.data = self._conn.recv(sensor_packet.size)
         except:
             print sensor_packet
             G.print_traceback()
             
         if len(sensor_packet.data) == 0:
             logger.debug("VM Disconnected.")
             self._conn.close()
             return
         
         if filename in disk_stream_dict:
             #logger.debug("Found %s in dict."%filename)
             for queue in disk_stream_dict[filename]:
                 queue.put(`sensor_packet`)
    def run(self):
        """
            Run forever grabbing data from the VM and forwarding it to listening
            clients 
        """
        # Get our meta data which is always the first packet sent
        meta = MetaHeader()
        meta_data = self._conn.recv(len(meta))
        if len(meta_data) == 0:
            logger.debug("VM Disconnected.")
            self._conn.close()
            return

        meta._unpack(meta_data)

        #logger.debug("Got meta data")
        #logger.debug(meta)

        # Store our filename for this VM
        # NOTE: We must strip the null chars off or comparisons will fail!
        filename = meta.filename.strip("\x00")

        # Create our sensor packet, and save its default size
        sensor_packet = DiskSensorPacket()
        sensor_header_size = len(sensor_packet)

        # Read packets forever
        while True:

            # Read and unpack our header
            header_data = self._conn.recv(sensor_header_size)
            if len(header_data) == 0:
                #logger.debug("VM Disconnected.")
                break

            sensor_packet._unpack(header_data)

            # Get the accompanying data
            try:
                sensor_packet.data = self._conn.recv(sensor_packet.size)
            except:
                print sensor_packet
                G.print_traceback()

            if len(sensor_packet.data) == 0:
                logger.debug("VM Disconnected.")
                self._conn.close()
                return

            if filename in disk_stream_dict:
                #logger.debug("Found %s in dict."%filename)
                for queue in disk_stream_dict[filename]:
                    queue.put( ` sensor_packet `)
Exemplo n.º 7
0
def import_analysis_scripts(path):
    """
        This is used to import analysis script files and extract their classes.
        
        @path: Directory on the disk that contains module files that all
                contain subclasses of LophiAnalysis
                
        @return: dict of analysis classes (dict[name] = Class)
    """

    # Append this path to import from it
    sys.path.append(path)

    analysis_classes = {}
    # scan our path for suitable modules to import
    if os.path.exists(path) and os.path.isdir(path):
        for dirpath, _dirnames, filenames in os.walk(path):
            for filename in filenames:
                if filename.endswith(".py") and not filename.startswith("_"):

                    # get our full filename
                    path_filename = os.path.join(dirpath, filename)

                    # get our module name
                    split_path = dirpath.split(os.path.sep)
                    module = '.'.join(split_path)

                    module = filename[:-3]

                    logger.debug("Extracting analyses from %s..." % module)

                    try:
                        tmp_module = importlib.import_module(module)
                        analysis = extract_analysis(tmp_module)
                    except:
                        logger.error("Could not import module: %s" % module)
                        G.print_traceback()
                        continue

                    if analysis is not None:
                        if "NAME" in analysis.__dict__:
                            analysis_classes[analysis.NAME] = (analysis,
                                                               path_filename)
                        else:
                            logger.warning(
                                "Found analysis with no NAME attribute")
                            analysis_classes[analysis.__class__.__name__] = (
                                analysis, path_filename)

        return analysis_classes
Exemplo n.º 8
0
def import_analysis_scripts(path):
    """
        This is used to import analysis script files and extract their classes.
        
        @path: Directory on the disk that contains module files that all
                contain subclasses of LophiAnalysis
                
        @return: dict of analysis classes (dict[name] = Class)
    """

    # Append this path to import from it
    sys.path.append(path)

    analysis_classes = {}
    # scan our path for suitable modules to import
    if os.path.exists(path) and os.path.isdir(path):
        for dirpath, _dirnames, filenames in os.walk(path):
            for filename in filenames:
                if filename.endswith(".py") and not filename.startswith("_"):

                    # get our full filename
                    path_filename = os.path.join(dirpath, filename)

                    # get our module name
                    split_path = dirpath.split(os.path.sep)
                    module = ".".join(split_path)

                    module = filename[:-3]

                    logger.debug("Extracting analyses from %s..." % module)

                    try:
                        tmp_module = importlib.import_module(module)
                        analysis = extract_analysis(tmp_module)
                    except:
                        logger.error("Could not import module: %s" % module)
                        G.print_traceback()
                        continue

                    if analysis is not None:
                        if "NAME" in analysis.__dict__:
                            analysis_classes[analysis.NAME] = (analysis, path_filename)
                        else:
                            logger.warning("Found analysis with no NAME attribute")
                            analysis_classes[analysis.__class__.__name__] = (analysis, path_filename)

        return analysis_classes
Exemplo n.º 9
0
    def run(self):
        """ Loop until we fail relaying objects """

        # Set our handler to close gracefully        
        G.set_exit_handler(self.cleanup)

        if G.VERBOSE:
            print "Relaying data from socket to queue."

        while self.RUNNING:

            # Try to unpack it
            try:
                # Get our data
                data = G.read_socket_data(self.SOCK)

                # Unpack our sensor output
                data = ProtoBuf.unpack_sensor_output(data)

            except EOFError:
                if G.VERBOSE:
                    print "RemoteQueue: Looks like our socket closed."

                break
            except:
                # Just die!
                if self.RUNNING:
                    print "ERROR/RemoteQueue: Could not unpickle network data."

                break

            # update our machine name to indicate its origin
            data['MACHINE'] = self.address[0] + "-" + data['MACHINE']

            # Write the data to our queue, if we can
            try:
                self.OUTPUT_QUEUE.put(data, False)
            except:
                if self.RUNNING:
                    print "ERROR/RemoteQueue: Could not write to output queue."
                G.print_traceback()
                break

        # Close socket
        self.SOCK.close()
Exemplo n.º 10
0
    def run(self):
        """ Loop until we fail relaying objects """

        # Set our handler to close gracefully
        G.set_exit_handler(self.cleanup)

        if G.VERBOSE:
            print "Relaying data from socket to queue."

        while self.RUNNING:

            # Try to unpack it
            try:
                # Get our data
                data = G.read_socket_data(self.SOCK)

                # Unpack our sensor output
                data = ProtoBuf.unpack_sensor_output(data)

            except EOFError:
                if G.VERBOSE:
                    print "RemoteQueue: Looks like our socket closed."

                break
            except:
                # Just die!
                if self.RUNNING:
                    print "ERROR/RemoteQueue: Could not unpickle network data."

                break

            # update our machine name to indicate its origin
            data['MACHINE'] = self.address[0] + "-" + data['MACHINE']

            # Write the data to our queue, if we can
            try:
                self.OUTPUT_QUEUE.put(data, False)
            except:
                if self.RUNNING:
                    print "ERROR/RemoteQueue: Could not write to output queue."
                G.print_traceback()
                break

        # Close socket
        self.SOCK.close()
Exemplo n.º 11
0
    def load_analysis(self, tmp_name_file):

        tmp_name = os.path.basename(tmp_name_file).split(".")[0]
        AnalysisClass = None

        try:
            import importlib
            Analysis = importlib.import_module(tmp_name)

            os.remove(tmp_name_file)
            os.remove(tmp_name_file + "c")

            AnalysisClass = Configs.extract_analysis(Analysis)
        except:
            G.print_traceback()
            logger.error("Could not import received file.")

        return AnalysisClass
Exemplo n.º 12
0
 def memory_read(self, addr, length):
     """
         Read physical memory
         
         @param addr: Address to start reading from
         @param length: How much memory to read
     """
     # Check for sensor
     if not self._has_sensor("memory"):
         return None
     
     try:
         data = self.memory.read(addr,length)
         return data
     except:
         logger.error("Memory read failed. (Addr: 0x%x, Len: %d)"%(addr,length))
         G.print_traceback()
         return None
Exemplo n.º 13
0
    def load_analysis(self,tmp_name_file):
        
        tmp_name = os.path.basename(tmp_name_file).split(".")[0]
        AnalysisClass = None
        
        try:
            import importlib
            Analysis = importlib.import_module(tmp_name)
            
            os.remove(tmp_name_file)
            os.remove(tmp_name_file+"c")

            AnalysisClass = Configs.extract_analysis(Analysis)
        except:
            G.print_traceback()
            logger.error("Could not import received file.")
        
        return AnalysisClass
Exemplo n.º 14
0
    def memory_read(self, addr, length):
        """
            Read physical memory
            
            @param addr: Address to start reading from
            @param length: How much memory to read
        """
        # Check for sensor
        if not self._has_sensor("memory"):
            return None

        try:
            data = self.memory.read(addr, length)
            return data
        except:
            logger.error("Memory read failed. (Addr: 0x%x, Len: %d)" %
                         (addr, length))
            G.print_traceback()
            return None
Exemplo n.º 15
0
    def get_machines(self):
        """ Get protocol buffer version of remote machines """
        while 1:
            try:
                logger.debug("Getting machine list for Controller/%s" %
                             self.name)

                # Get response
                cmd = LophiCommand(G.CTRL_CMD_PICKLE, args=["machines"])
                data = self.send_cmd(cmd)
                status = G.read_socket_data(self.SOCK)

                # Unpack our machine list
                #    (WARNING: This a subset of the objects at the server
                if data is not None:
                    self.machines = ProtoBuf.unpack_machine_list(data)
                else:
                    self.machines = []

                return status
            except:
                G.print_traceback()
                self.connect()
Exemplo n.º 16
0
    def run(self):
        """" 
            Run analysis and then continuously read and process commands from 
            our command queue.    
        """
        
        logger.info("Started LO-PHI analysis '%s'. (PID: %d)"%(self.NAME,
                                                               os.getpid()))
        
        COMMANDS = {
            #  Command                      Function
            G.CTRL_CMD_PAUSE        :   self.analysis_pause,
            G.CTRL_CMD_UNPAUSE      :   self.analysis_resume,
            G.CTRL_CMD_STOP         :   self.analysis_stop
            }
        
        # grab a machine from the queue if one wasn't explicity set
        if self.machine_name is not None:
            self.machine = self.machine_list[self.machine_name]
            
        if self.machine is None:
            logger.error("No machine provided to analysis.")
            return False
        
        self.machine.ALLOCATED = self.id
                
        # Start our analysis
        logger.debug("Acquiring mutex and starting analysis...")
        with self.machine.MUTEX:
            
            # Put ourselves in the running pool
            if self.running_dict is not None and \
               self.lophi_analysis_id is not None:
                # Moved from queued to running
                self.running_dict['queued'].remove(self.lophi_analysis_id)
                self.running_dict['running'].append(self.lophi_analysis_id)
            
            if self.lophi_command is not None and \
               self.lophi_command.db_analysis_id is not None:
                try:
                    DB_analysis = DatastoreAnalysis(self.services_host)
                    DB_analysis.update_analysis_machine(
                        self.lophi_command.db_analysis_id, self.machine)
                    DB_analysis.update_analysis(
                        self.lophi_command.db_analysis_id,
                        "status",
                        G.JOB_RUNNING)
                    DB_analysis.update_analysis(
                        self.lophi_command.db_analysis_id,
                        "started",
                        time.time())
                except:
                    logger.error("Could not update the database with analysis info.")
                
            # Run the user-defined analysis
            try:
                # Set our machine to the proper profile
                if self.lophi_command is not None:
                    logger.debug("Setting machine profile...")
                    if self.lophi_command.volatility_profile is not None:
                        prof_status = self.machine.set_volatility_profile(
                            self.lophi_command.volatility_profile)
                    elif self.VOLATILITY_PROFILE is not None:
                        prof_status = self.machine.set_volatility_profile(
                            self.VOLATILITY_PROFILE)
                    if not prof_status:
                        err = "Could not set profile (%s) for machine (%s)."%(
                                        self.machine.config.volatility_profile,
                                        self.machine.config.name)
                        logger.error(err)

                #
                #    Run the actual analysis
                #
                self.analysis_start()
                
                if self.lophi_command is not None and \
                   self.lophi_command.db_analysis_id is not None:
                    try:
                        DB_analysis.update_analysis(
                            self.lophi_command.db_analysis_id,
                            "status",
                            G.JOB_DONE)
                    except:
                        logger.warn("Could not update the database with analysis info.")

            except:
                logger.error("Analysis failed to start!")
                
                G.print_traceback()
                
                if self.lophi_command is not None and \
                   self.lophi_command.db_analysis_id is not None:
                    try:
                        DB_analysis.update_analysis(
                            self.lophi_command.db_analysis_id,
                            "error",
                            G.get_traceback())
                        DB_analysis.update_analysis(
                            self.lophi_command.db_analysis_id,
                            "status",
                            G.JOB_FAILED)
                    except:
                        logger.warn("Could not update the database with analysis info.")
                
                self.CONTINUE_EXECUTION = False
                # Try to stop anything that may have started.
                try:
                    self.analysis_stop()
                except:
                    pass

            # Wait for output to start returning, and handle appropriately
            while False and self.CONTINUE_EXECUTION:
                
                logger.debug("Waiting for cmd")
                
                command = self.command_queue.get()
                
                logger.debug("Got: %s" % command)
                
                # Split up our command
                cmd = command.rstrip().split(" ")

                # See if it's valid command
                if cmd[0] not in COMMANDS.keys():
                    logger.error("Got invalid command: %s" % command)
                else:
                    logger.debug("Executing %s"%cmd[0])
                    try:
                        COMMANDS[cmd[0]](command)
                    except:
                        G.print_traceback()
                    
                    if cmd[0] == G.CTRL_CMD_STOP or cmd[0] == G.CTRL_CMD_KILL:
                        break
                    
        # Mark analysis completion time
        try:
            DB_analysis.update_analysis(self.lophi_command.db_analysis_id, 
                                    "completed",
                                    time.time())
        except:
            logger.warn("Could not update the database with analysis info.")

        # Clean up and release machine
        logger.debug("Release machine back to queue.")
        self.machine.ALLOCATED = -1
        
        # remove from running analysis
        if self.running_dict is not None and self.lophi_analysis_id is not None:
            self.running_dict['running'].remove(self.lophi_analysis_id)
        
        # Did we get our machine from the queue? Put it back.
        if self.machine_queue is not None and self.machine_name is not None:
            self.machine_queue.put(self.machine_name)

        self.command_queue.close()
        logger.debug("LophiAnalysis stopped.")
        return True
Exemplo n.º 17
0
    def run(self):
        """
            Figure out which type of host we are examining and call the 
            appropriate function.
        """

        from lophi_semanticgap.disk.sata import SATAInterpreter
        from lophi_semanticgap.disk.sata_reconstructor import SATAReconstructor
        from lophi_semanticgap.disk.filesystem_reconstructor import SemanticEngineDisk

        logger.debug("DiskEngine Started.")

        # Scan in our starting point
        if self.machine.type == G.MACHINE_TYPES.PHYSICAL:
            if not self._check_disk_scan():
                logger.error(
                    "Analysis cannot continue without a valid scan file.")
                return

            disk_img = self.machine.config.disk_scan
        else:
            # Get our image name for the virtual HDD on this host
            image_name = self.machine.disk_get_filename()
            logger.debug("Scanning disk image (%s)..." % (image_name))

            if image_name is None:
                logger.error("No disk found for VM (%s)." %
                             self.machine.config.name)
                return

            if image_name.endswith("qcow2"):
                logger.warning(
                    "Got qcow2 image, scanning the base image, ensure that you reset the machine!"
                )
                disk_img = self.machine.config.disk_base
            else:
                disk_img = image_name

        # Setup our tmp file
        self.working_disk_img = os.path.join(
            G.DIR_ROOT, G.DIR_TMP, self.machine.config.name + "-disk.img.tmp")

        # Create a backup
        logger.debug("Copying %s to %s..." % (disk_img, self.working_disk_img))
        cmd = "cp --sparse=always %s %s" % (disk_img, self.working_disk_img)
        subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read()
        os.chmod(self.working_disk_img, 0755)

        # Set up our semantic bridge
        logger.info(
            "Parsing disk image %s into our semantic engine... (This may take a while)"
            % self.working_disk_img)
        semantic_engine = SemanticEngineDisk(self.working_disk_img)

        # SATA Interpreter
        sata = SATAInterpreter()

        # SATA Interpreter
        sata_reconstructor = SATAReconstructor(
            sector_size=G.SENSOR_DISK.DEFAULT_SECTOR_SIZE)

        # Get data forever and report it back
        self.RUNNING = True
        while self.RUNNING:

            # Accept commands
            try:
                cmd = self.command_queue.get(False).split(" ")
                logger.debug("Got cmd: %s" % cmd)
                if cmd[0] == G.CTRL_CMD_PAUSE:
                    logger.debug("Pausing analysis")
                    self.PAUSED = True
                    self.machine.disk._disconnect()
                if cmd[0] == G.CTRL_CMD_UNPAUSE:
                    logger.debug("Resuming Analysis")
                    self.PAUSED = False
                    self.machine.disk._connect()
                if cmd[0] == G.CTRL_CMD_KILL or cmd[0] == G.CTRL_CMD_STOP:
                    logger.debug("Got kill command")
                    self.RUNNING = False
                    self.machine.disk._disconnect()
                    break
            except:
                # Do nothing
                pass

            if self.PAUSED:
                time.sleep(1)
                continue

            # Get our packet
            try:
                data = self.machine.disk_get_packet()
                logger.debug("Got: %s" % data)
            except:
                G.print_traceback()
                logger.debug("Disk introspection socket closed.")
                break

            # Good data?
            if data is None:
                continue

            if self.machine.type == G.MACHINE_TYPES.PHYSICAL:
                lophi_packet = type('AnonClass', (object, ), {
                    "sata_header": None,
                    "sata_data": None
                })
                (lophi_packet.sata_header,
                 lophi_packet.sata_data) = sata.extract_sata_data( ` data `)

                # deal with SATA NCQ reordering
                disk_sensor_pkts = sata_reconstructor.process_packet(
                    lophi_packet)
            else:
                disk_sensor_pkts = [data]

            # Process all of our disk packets
            if disk_sensor_pkts:
                for dsp in disk_sensor_pkts:
                    # Skip empty packets
                    if not dsp:
                        continue

                    try:

                        fs_operations = semantic_engine.get_access(
                            dsp.sector, dsp.num_sectors, dsp.disk_operation,
                            dsp.data)
                        self.parse_actions(time.time(), fs_operations)

                    except:
                        logging.exception(
                            "Encountered error while trying to bridge semantic gap for this disk access."
                        )


#             logger.debug("got actions %s"%actions)
# Handle our output
#             self.parse_actions(actions)

        logger.debug("Disk analysis exiting...")
Exemplo n.º 18
0
def main(options):
    """
        This script will connect to the LO-PHI Disk Sensor and log all of the 
        activity to both a dcap file with RAW data capture
    """
    
    # Should we automatically set a output dir?
    OUTPUT_DIR = options.output_dir
    if OUTPUT_DIR is None:
        OUTPUT_DIR = "lophi_data_"+datetime.datetime.now().strftime("%m%d")
        
    # Make sure we can create the output directory
    if not os.path.exists(OUTPUT_DIR):
        try:
            os.makedirs(OUTPUT_DIR)
        except:
            logger.error("Could not create output directory. (%s)"%OUTPUT_DIR)
            return
    
    # Auto-generate our dcap filename
    log_dcap_filename = os.path.join(OUTPUT_DIR, "lophi_disk_"+datetime.datetime.now().strftime("%m-%d-%H:%M")+".dcap")

    print "* Initializing SATA sensor..."                
    
    # Initialize our disk sensor    
    if options.sensor_type == G.MACHINE_TYPES.PHYSICAL:
        disk_sensor = DiskSensorPhysical(G.SENSOR_DISK.DEFAULT_IP,
                                     bind_ip=default_dest_ip,
                                     name="SATA_Sensor")
        
        if not disk_sensor.is_up():
            logger.error("Disk sensor appears to be down.")
            return
    else:
        disk_sensor = DiskSensorVirtual(options.target)
        
    print "* Logging data to: %s" % log_dcap_filename

    print "* Setting up DCAP logger..."
    # Setup our dcap logger
    # We use a queue so that we don't hold up the socket.
    log_dcap_queue = multiprocessing.Queue()
    log_dcap_writer = CaptureWriter(log_dcap_filename,
                                    log_dcap_queue)
    log_dcap_writer.start()
        
    print "* Connecting to our sensor..."
    
    # Get data forever and report it back
    disk_sensor._connect()

    if options.sensor_type == G.MACHINE_TYPES.PHYSICAL:
        print "* Enabling SATA extraction..."
        disk_sensor.sata_enable_all()
        
        print "* Reading SATA Frame packets..."
        
    else:
        print "* Reading Disk Sensor Packets..."
    
    UPDATE_INTERVAL = 5 # Seconds
    last_print_time = 0
    while 1:
        try:
            # Get our packet
            # Returns a SATAFrame for physical and DiskSensorPacket for virtual.
            packet = disk_sensor.get_disk_packet()    

            # Log to 
            if log_dcap_queue is not None:
                log_dcap_queue.put( packet )     
                
            # Should we print something to screen?
            now = time.time()
            if now - last_print_time > UPDATE_INTERVAL:
                size = sizeof_fmt(os.path.getsize(log_dcap_filename))
                print "* Captured %s."%size
                last_print_time = now
                                
        except:
            logger.error("Problem getting disk packet.")
            G.print_traceback()
            break
            
    if log_dcap_queue is not None:
        log_dcap_writer.stop()
        
    if options.sensor_type == G.MACHINE_TYPES.PHYSICAL:
        disk_sensor.sata_disable()
    
    return
Exemplo n.º 19
0
def main(options):
    """
        This script will connect to the LO-PHI Disk Sensor and log all of the 
        activity to both a dcap file with RAW data capture
    """

    # Should we automatically set a output dir?
    OUTPUT_DIR = options.output_dir
    if OUTPUT_DIR is None:
        OUTPUT_DIR = "lophi_data_" + datetime.datetime.now().strftime("%m%d")

    # Make sure we can create the output directory
    if not os.path.exists(OUTPUT_DIR):
        try:
            os.makedirs(OUTPUT_DIR)
        except:
            logger.error("Could not create output directory. (%s)" %
                         OUTPUT_DIR)
            return

    # Auto-generate our dcap filename
    log_dcap_filename = os.path.join(
        OUTPUT_DIR, "lophi_disk_" +
        datetime.datetime.now().strftime("%m-%d-%H:%M") + ".dcap")

    print "* Initializing SATA sensor..."

    # Initialize our disk sensor
    if options.sensor_type == G.MACHINE_TYPES.PHYSICAL:
        disk_sensor = DiskSensorPhysical(G.SENSOR_DISK.DEFAULT_IP,
                                         bind_ip=default_dest_ip,
                                         name="SATA_Sensor")

        if not disk_sensor.is_up():
            logger.error("Disk sensor appears to be down.")
            return
    else:
        disk_sensor = DiskSensorVirtual(options.target)

    print "* Logging data to: %s" % log_dcap_filename

    print "* Setting up DCAP logger..."
    # Setup our dcap logger
    # We use a queue so that we don't hold up the socket.
    log_dcap_queue = multiprocessing.Queue()
    log_dcap_writer = CaptureWriter(log_dcap_filename, log_dcap_queue)
    log_dcap_writer.start()

    print "* Connecting to our sensor..."

    # Get data forever and report it back
    disk_sensor._connect()

    if options.sensor_type == G.MACHINE_TYPES.PHYSICAL:
        print "* Enabling SATA extraction..."
        disk_sensor.sata_enable_all()

        print "* Reading SATA Frame packets..."

    else:
        print "* Reading Disk Sensor Packets..."

    UPDATE_INTERVAL = 5  # Seconds
    last_print_time = 0
    while 1:
        try:
            # Get our packet
            # Returns a SATAFrame for physical and DiskSensorPacket for virtual.
            packet = disk_sensor.get_disk_packet()

            # Log to
            if log_dcap_queue is not None:
                log_dcap_queue.put(packet)

            # Should we print something to screen?
            now = time.time()
            if now - last_print_time > UPDATE_INTERVAL:
                size = sizeof_fmt(os.path.getsize(log_dcap_filename))
                print "* Captured %s." % size
                last_print_time = now

        except:
            logger.error("Problem getting disk packet.")
            G.print_traceback()
            break

    if log_dcap_queue is not None:
        log_dcap_writer.stop()

    if options.sensor_type == G.MACHINE_TYPES.PHYSICAL:
        disk_sensor.sata_disable()

    return
Exemplo n.º 20
0
    def run(self):
        """
            Figure out which type of host we are examining and call the 
            appropriate function.
        """

        from lophi_semanticgap.disk.sata import SATAInterpreter
        from lophi_semanticgap.disk.sata_reconstructor import SATAReconstructor
        from lophi_semanticgap.disk.filesystem_reconstructor import SemanticEngineDisk

        logger.debug("DiskEngine Started.")

        # Scan in our starting point
        if self.machine.type == G.MACHINE_TYPES.PHYSICAL:
            if not self._check_disk_scan():
                logger.error("Analysis cannot continue without a valid scan file.")
                return
            
            disk_img = self.machine.config.disk_scan
        else:
            # Get our image name for the virtual HDD on this host
            image_name = self.machine.disk_get_filename()
            logger.debug("Scanning disk image (%s)..." % (image_name))

            if image_name is None:
                logger.error("No disk found for VM (%s)."%self.machine.config.name)
                return
            
            if image_name.endswith("qcow2"):
                logger.warning("Got qcow2 image, scanning the base image, ensure that you reset the machine!")
                disk_img = self.machine.config.disk_base
            else:
                disk_img = image_name
                
        # Setup our tmp file
        self.working_disk_img = os.path.join(G.DIR_ROOT,G.DIR_TMP,self.machine.config.name+"-disk.img.tmp")
        
        # Create a backup
        logger.debug("Copying %s to %s..."%(disk_img, self.working_disk_img))
        cmd = "cp --sparse=always %s %s" % (disk_img, self.working_disk_img)
        subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read()
        os.chmod(self.working_disk_img, 0755)
        
        # Set up our semantic bridge
        logger.info("Parsing disk image %s into our semantic engine... (This may take a while)" % self.working_disk_img)
        semantic_engine = SemanticEngineDisk(self.working_disk_img)

        # SATA Interpreter
        sata = SATAInterpreter() 
        
        # SATA Interpreter
        sata_reconstructor = SATAReconstructor(sector_size=G.SENSOR_DISK.DEFAULT_SECTOR_SIZE)
        
        # Get data forever and report it back
        self.RUNNING  = True
        while self.RUNNING:
            
            # Accept commands
            try:
                cmd = self.command_queue.get(False).split(" ")
                logger.debug("Got cmd: %s" % cmd)
                if cmd[0] == G.CTRL_CMD_PAUSE:
                    logger.debug("Pausing analysis")
                    self.PAUSED = True
                    self.machine.disk._disconnect()
                if cmd[0] == G.CTRL_CMD_UNPAUSE:
                    logger.debug("Resuming Analysis")
                    self.PAUSED = False
                    self.machine.disk._connect()
                if cmd[0] == G.CTRL_CMD_KILL or cmd[0] == G.CTRL_CMD_STOP:
                    logger.debug("Got kill command")
                    self.RUNNING = False
                    self.machine.disk._disconnect()
                    break
            except:
                # Do nothing
                pass
            
            if self.PAUSED:
                time.sleep(1)
                continue
            
            # Get our packet
            try:
                data = self.machine.disk_get_packet()
                logger.debug("Got: %s"%data)
            except:
                G.print_traceback()
                logger.debug("Disk introspection socket closed.")
                break
            
            # Good data?
            if data is None:
                continue

            if self.machine.type == G.MACHINE_TYPES.PHYSICAL:
                lophi_packet = type('AnonClass', (object,), { "sata_header": None, "sata_data": None })                    
                (lophi_packet.sata_header, lophi_packet.sata_data) = sata.extract_sata_data(`data`)
                
                # deal with SATA NCQ reordering
                disk_sensor_pkts = sata_reconstructor.process_packet(lophi_packet)
            else:
                disk_sensor_pkts = [data]
                
            
            # Process all of our disk packets
            if disk_sensor_pkts:
                for dsp in disk_sensor_pkts:
                    # Skip empty packets
                    if not dsp:
                        continue
      
                    try:

                        fs_operations = semantic_engine.get_access(dsp.sector, 
                                                                   dsp.num_sectors, 
                                                                   dsp.disk_operation, 
                                                                   dsp.data)   
                        self.parse_actions(time.time(), fs_operations)                   
                               
                    except:
                        logging.exception("Encountered error while trying to bridge semantic gap for this disk access.")

            
#             logger.debug("got actions %s"%actions)
            # Handle our output
#             self.parse_actions(actions)

        logger.debug("Disk analysis exiting...")
Exemplo n.º 21
0
    def run(self):
        """" 
            Run analysis and then continuously read and process commands from 
            our command queue.    
        """

        logger.info("Started LO-PHI analysis '%s'. (PID: %d)" %
                    (self.NAME, os.getpid()))

        COMMANDS = {
            #  Command                      Function
            G.CTRL_CMD_PAUSE: self.analysis_pause,
            G.CTRL_CMD_UNPAUSE: self.analysis_resume,
            G.CTRL_CMD_STOP: self.analysis_stop
        }

        # grab a machine from the queue if one wasn't explicity set
        if self.machine_name is not None:
            self.machine = self.machine_list[self.machine_name]

        if self.machine is None:
            logger.error("No machine provided to analysis.")
            return False

        self.machine.ALLOCATED = self.id

        # Start our analysis
        logger.debug("Acquiring mutex and starting analysis...")
        with self.machine.MUTEX:

            # Put ourselves in the running pool
            if self.running_dict is not None and \
               self.lophi_analysis_id is not None:
                # Moved from queued to running
                self.running_dict['queued'].remove(self.lophi_analysis_id)
                self.running_dict['running'].append(self.lophi_analysis_id)

            if self.lophi_command is not None and \
               self.lophi_command.db_analysis_id is not None:
                try:
                    DB_analysis = DatastoreAnalysis(self.services_host)
                    DB_analysis.update_analysis_machine(
                        self.lophi_command.db_analysis_id, self.machine)
                    DB_analysis.update_analysis(
                        self.lophi_command.db_analysis_id, "status",
                        G.JOB_RUNNING)
                    DB_analysis.update_analysis(
                        self.lophi_command.db_analysis_id, "started",
                        time.time())
                except:
                    logger.error(
                        "Could not update the database with analysis info.")

            # Run the user-defined analysis
            try:
                # Set our machine to the proper profile
                if self.lophi_command is not None:
                    logger.debug("Setting machine profile...")
                    if self.lophi_command.volatility_profile is not None:
                        prof_status = self.machine.set_volatility_profile(
                            self.lophi_command.volatility_profile)
                    elif self.VOLATILITY_PROFILE is not None:
                        prof_status = self.machine.set_volatility_profile(
                            self.VOLATILITY_PROFILE)
                    if not prof_status:
                        err = "Could not set profile (%s) for machine (%s)." % (
                            self.machine.config.volatility_profile,
                            self.machine.config.name)
                        logger.error(err)

                #
                #    Run the actual analysis
                #
                self.analysis_start()

                if self.lophi_command is not None and \
                   self.lophi_command.db_analysis_id is not None:
                    try:
                        DB_analysis.update_analysis(
                            self.lophi_command.db_analysis_id, "status",
                            G.JOB_DONE)
                    except:
                        logger.warn(
                            "Could not update the database with analysis info."
                        )

            except:
                logger.error("Analysis failed to start!")

                G.print_traceback()

                if self.lophi_command is not None and \
                   self.lophi_command.db_analysis_id is not None:
                    try:
                        DB_analysis.update_analysis(
                            self.lophi_command.db_analysis_id, "error",
                            G.get_traceback())
                        DB_analysis.update_analysis(
                            self.lophi_command.db_analysis_id, "status",
                            G.JOB_FAILED)
                    except:
                        logger.warn(
                            "Could not update the database with analysis info."
                        )

                self.CONTINUE_EXECUTION = False
                # Try to stop anything that may have started.
                try:
                    self.analysis_stop()
                except:
                    pass

            # Wait for output to start returning, and handle appropriately
            while False and self.CONTINUE_EXECUTION:

                logger.debug("Waiting for cmd")

                command = self.command_queue.get()

                logger.debug("Got: %s" % command)

                # Split up our command
                cmd = command.rstrip().split(" ")

                # See if it's valid command
                if cmd[0] not in COMMANDS.keys():
                    logger.error("Got invalid command: %s" % command)
                else:
                    logger.debug("Executing %s" % cmd[0])
                    try:
                        COMMANDS[cmd[0]](command)
                    except:
                        G.print_traceback()

                    if cmd[0] == G.CTRL_CMD_STOP or cmd[0] == G.CTRL_CMD_KILL:
                        break

        # Mark analysis completion time
        try:
            DB_analysis.update_analysis(self.lophi_command.db_analysis_id,
                                        "completed", time.time())
        except:
            logger.warn("Could not update the database with analysis info.")

        # Clean up and release machine
        logger.debug("Release machine back to queue.")
        self.machine.ALLOCATED = -1

        # remove from running analysis
        if self.running_dict is not None and self.lophi_analysis_id is not None:
            self.running_dict['running'].remove(self.lophi_analysis_id)

        # Did we get our machine from the queue? Put it back.
        if self.machine_queue is not None and self.machine_name is not None:
            self.machine_queue.put(self.machine_name)

        self.command_queue.close()
        logger.debug("LophiAnalysis stopped.")
        return True