예제 #1
0
    def update_eta():
        new_eta_dict = Cracker.crt_process.get_dict()

        if Cracker.eta_dict is None:
            is_changed = True
        else:
            is_changed = False
            for key, value in new_eta_dict.items():
                if value != Cracker.eta_dict[key]:
                    is_changed = True
                    break

        # If no changes were made no updates are necessary
        if not is_changed:
            return

        Cracker.eta_dict = new_eta_dict
        # TODO This message is wrongly displayed right around when a hashcat process stops
        eta = "Error calculating ETA"

        # TODO maksfile eta is not properly calculated because hashcat outputs eta for current queue
        # TODO each mask has it's own queue
        # TODO implement rule 5 with hashcat only
        if Cracker.crt_rule["type"] == "filemask_hashcat" or Cracker.crt_rule[
                "wordsize"] <= 0:
            eta = "No ETA available"
        elif Cracker.eta_dict["progress"] == -1 and Cracker.eta_dict[
                "eta"] == "":
            eta = "Cracking process starting"
        elif Cracker.eta_dict["eta"] != "" and Cracker.eta_dict[
                "eta"] != "(0 secs)":
            eta = Cracker.eta_dict["eta"]
        elif Cracker.eta_dict["speed"] != -1 and Cracker.eta_dict[
                "progress"] != -1:
            # For rules generated at runtime with variable base dictionary length we cannot calculate ETA
            speed = Cracker.eta_dict["speed"]
            if speed != 0:
                if Cracker.crt_rule["wordsize"] < Cracker.eta_dict["progress"]:
                    Comunicator.error_logger(
                        "Dict size (%d) seems less than current attacked (%d)"
                        % (Cracker.crt_rule["wordsize"],
                           Cracker.eta_dict["progress"]))

                eta_seconds = (Cracker.crt_rule["wordsize"] -
                               Cracker.eta_dict["progress"]) / speed
                eta = Cracker.seconds_to_time(eta_seconds)
            else:
                eta = "Generating dict..."

        # Check if the eta already has the desired value in order to avoid an update
        # Usually happens when 'Cracker.crt_rule["wordsize"] <= 0'
        if Cracker.old_eta == eta:
            return

        Cracker.old_eta = eta

        try:
            Cracker.req.sendeta(eta)
        except Cracker.req.ServerDown:
            pass
예제 #2
0
    def _force_cleanup(self):
        try:
            if self.proc and self.proc.poll() is None:
                self.interrupt(self.proc, self.cmd)

            # Stop ___hashcat_writer_thread as soon as possible (takes a bit because of the sleep(1))
            self.stop_in_thread = True

            # Close both processes writing pipe if they are still open
            self.err_w = SingleProcess._close_helper(
                self.err_w)[0]  # Stops err_reader_thread
            self.out_w = SingleProcess._close_helper(
                self.out_w)[0]  # Stops out_reader_thread

            # Join threads if they are still running. Stopping the threads also closes the respective pipes
            self.in_writer_thread = SingleProcess._join_helper(
                self.in_writer_thread)[0]
            self.err_reader_thread = SingleProcess._join_helper(
                self.err_reader_thread)[0]
            self.out_reader_thread = SingleProcess._join_helper(
                self.out_reader_thread)[0]

            # The reading/writing ends of the pipes were closed by the threads
            self.err_r = self.out_r = self.in_w = None

            # Close the reading end of the stdin pipe
            self.in_r = SingleProcess._close_helper(self.in_r)[0]

        except AttributeError as e:
            Comunicator.error_logger("Attribute error raised %s" % e)
            pass
예제 #3
0
    def _force_cleanup(self):
        try:
            if self.fst_proc and self.fst_proc.poll() is None:
                self.interrupt(self.fst_proc, self.fst_cmd)

            if self.snd_proc and self.snd_proc.poll() is None:
                self.interrupt(self.snd_proc, self.snd_cmd)

            # Close both processes writing pipe if they are still open
            self.fst_err_w = DoubleProcess._close_helper(
                self.fst_err_w)[0]  # Stops fst_err_reader_thread
            self.snd_err_w = DoubleProcess._close_helper(
                self.snd_err_w)[0]  # Stops snd_err_reader_thread
            self.snd_out_w = DoubleProcess._close_helper(
                self.snd_out_w)[0]  # Stops snd_out_reader_thread

            # Close interprocess pipe if it not killed yet
            self.comm_r = DoubleProcess._close_helper(self.comm_r)[0]
            self.comm_w = DoubleProcess._close_helper(self.comm_w)[0]

            # Join threads if they are still running. Stopping the threads also closes the respective pipes
            self.fst_err_reader_thread = DoubleProcess._join_helper(
                self.fst_err_reader_thread)[0]
            self.snd_err_reader_thread = DoubleProcess._join_helper(
                self.snd_err_reader_thread)[0]
            self.snd_out_reader_thread = DoubleProcess._join_helper(
                self.snd_out_reader_thread)[0]

            # The reading ends of the pipes were closed by the threads
            self.snd_err_r = self.snd_out_r = self.fst_err_r = None

        except AttributeError as e:
            Comunicator.error_logger("Attribute error raised %s" % e)
            pass
예제 #4
0
파일: requester.py 프로젝트: vladtp/psknow
    def stopwork(self, suppress_stdout=False):
        """
            Stop current job
            :return:
                True - An error occurred
                None - Current job stopped
            :raises Requester.ServerDown: The server could not be reached
        """
        url = Configuration.remote_server + "stopwork"
        Comunicator.info_logger("Stopping work from '%s'" % url)

        response = None
        try:
            response = requests.post(url,
                                     data={"apikey": self.apikey},
                                     timeout=10)
        except requests.exceptions.ConnectionError:
            raise Requester.ServerDown
        except requests.exceptions.Timeout:
            Comunicator.fatal_regular_message("Backend is unresponsive")
        if response.status_code == 502:
            raise Requester.ServerDown

        _, err = Requester._decode_json(response)
        if err != "":
            msg = "Error stopping work '%s'" % err
            if suppress_stdout:
                Comunicator.error_logger(msg)
            else:
                self.err_printer(msg)
            return True

        return None
예제 #5
0
    def _reap_fst(self, now=False):
        if self.fst_reaped:
            return True

        if now and self.fst_proc.poll() is None:
            # Wait for the first process to stop executing
            self.fst_proc.wait()

        if self.fst_proc.poll() is not None:
            # The first process stopped executing, close it's write pipes
            self.fst_err_w.close()
            self.fst_err_w = None

            self.comm_w.close()
            self.comm_w = None

            # After we closed the writing end of the err pipe _all_reader_thread should stop
            self.fst_err_reader_thread.join()
            self.fst_err_reader_thread = None

            # Convert error from list to string
            self.fst_err = "".join(self.fst_err)

            # Mark the first process as completely stopped
            self.fst_reaped = True

            # TODO this can be generic. If this becomes static the poll needs to be checked against None
            if self.critical and self.fst_proc.poll() != 0:
                Comunicator.error_logger(
                    "First process %s exited with status %d. Stderr:\n%s" %
                    (self.fst_cmd, self.fst_proc.poll(), self.fst_err))
                self._force_cleanup()
                Comunicator.fatal_debug_printer(
                    "Fatal error encountered in critical first process. See logs."
                )

            return True
        return False