예제 #1
0
    def iterate_minibatches(self,
                            generate_jobs,
                            batch_size,
                            buffer_size,
                            postprocess_batch=None):
        assert buffer_size >= batch_size
        buffer_semaphore = BoundedSemaphore(
            buffer_size)  # jobs currently in buffer
        generate_jobs = iter(generate_jobs)
        postprocess_batch = postprocess_batch or nop

        @background(max_prefetch=-1)
        def _load_jobs_and_iterate_batch_sizes():
            """
            Loads jobs into queue, yields batch_size every time a generator
            can order this batch_size from the database
            """
            current_batch = 0
            for task in generate_jobs:
                buffer_semaphore.acquire()
                self.add_jobs(task)
                current_batch += 1
                if current_batch == batch_size:
                    yield current_batch  # you can now load additional batch_size elements
                    current_batch = 0
            if current_batch != 0:
                yield current_batch  # load the remaining elements

        for allowed_batch_size in _load_jobs_and_iterate_batch_sizes():
            batch = self.get_results_batch(allowed_batch_size)
            for _ in batch:
                buffer_semaphore.release()

            yield postprocess_batch(batch)
예제 #2
0
class BoundedExecutor:
    """BoundedExecutor behaves as a ThreadPoolExecutor which will block on
    calls to submit() once the limit given as "bound" work items are queued for
    execution.
    :param bound: Integer - the maximum number of items in the work queue
    :param max_workers: Integer - the size of the thread pool
    """
    def __init__(self, bound, max_workers):
        self.executor = ThreadPoolExecutor(max_workers=max_workers)
        self.semaphore = BoundedSemaphore(bound + max_workers)

    """See concurrent.futures.Executor#submit"""

    def submit(self, fn, *args, **kwargs):
        self.semaphore.acquire()
        try:
            future = self.executor.submit(fn, *args, **kwargs)
        except:
            self.semaphore.release()
            raise
        else:
            future.add_done_callback(lambda x: self.semaphore.release())
            return future

    """See concurrent.futures.Executor#shutdown"""

    def shutdown(self, wait=True):
        self.executor.shutdown(wait)
 def _add_token_loop(self, time_delta):
     """Add token every time_delta seconds."""
     while True:
         try:
             BoundedSemaphore.release(self)
         except ValueError:  # ignore if already max possible value
             pass
         sleep(time_delta)  # ignore EINTR
class RequestManager:
    def __init__(self, max_workers):
        self.max_workers = max_workers
        self.lock = Lock()
        self.sem = BoundedSemaphore(max_workers)
        self.last_request = Value('d', 0.0)
        self.last_restricted_request = Value('d', 0.0)

    @contextmanager
    def normal_request(self):
        with self.lock:
            self.sem.acquire()
        time.sleep(
            max(
                0.0, self.last_restricted_request.value + 0.6 +
                (random.random() * 0.15) - time.time()))
        try:
            yield
        except Exception as e:
            raise e
        finally:
            self.last_request.value = time.time()
            self.sem.release()

    @contextmanager
    def restricted_request(self):
        with self.lock:
            for i in range(self.max_workers):
                self.sem.acquire()
        time.sleep(
            max(
                0.0, self.last_request.value + 0.6 + (random.random() * 0.15) -
                time.time()))
        try:
            yield
        except Exception as e:
            raise e
        finally:
            self.last_request.value = time.time()
            self.last_restricted_request.value = time.time()
            for i in range(self.max_workers):
                self.sem.release()
예제 #5
0
    def process(self, input_filepath, msi_loci, config):
        self.__reset()

        # Generate dictionary read counts for loci
        counts = {}
        loci = []
        for locus in msi_loci:
            counts[locus.locus()] = {}
            loci.append(locus)

        # Generate input and output queues and (mutex) semaphores
        # for each.
        queue_out = Queue()
        queue_in = Queue()
        queue_full = BoundedSemaphore(100)
        full = Semaphore(0)
        empty = BoundedSemaphore(40)
        mutex_out = Semaphore(1)
        mutex_in = Semaphore(1)

        # Set amount of consumer threads; minimum one.
        consumer_threads = config['threads'] - 1
        if consumer_threads < 1:
            consumer_threads = 1

        # Create producer thread; currently only using single thread
        # since I/O is more of the limiter than CPU bound processes.
        self.__producer = Process(target=self.extract_reads,
                                  args=(input_filepath, msi_loci, full, empty,
                                        mutex_out, queue_in, consumer_threads))
        self.__producer.start()

        # Spawn the set amount of threads/processes
        if self.debug_output:
            tprint('Main> Generating {0} analyzer process(es).'.format(
                consumer_threads))
        for i in range(0, consumer_threads):
            p = Process(target=self.read_analyzer,
                        args=(queue_in, queue_out, full, empty, mutex_in,
                              mutex_out, queue_full))
            self.__consumers.append(p)
            self.__consumers[-1].start()

        # Iterate through the loci, fetching any reads and pushing them to
        # the pool of threads, collecting the output as they process it.
        query_delay = 0.050  # In seconds

        loop_counter = 0
        proc_check_interval = 100
        while (not queue_out.empty() or self.has_live_threads()):
            # Sleep for the set amount of time so the queue isn't constantly
            # getting hammered with queries
            time.sleep(query_delay)
            loop_counter += 1
            if loop_counter % proc_check_interval is 0:
                # Time to check that the consumers
                # didn't die while the producer is still producing
                mutex_out.acquire()
                self.status_check(queue_out.qsize())
                mutex_out.release()

            while not queue_out.empty():
                # There is data on the queue to be processed;
                # the return from the queue should be a tuple
                # with (locus, repeat_count)
                mutex_out.acquire()
                result = queue_out.get()
                locus = result[0]
                repeat_count = result[1]
                if repeat_count >= 0:
                    if locus not in counts:
                        counts[locus] = {}
                    if repeat_count not in counts[locus]:
                        counts[locus][repeat_count] = 0
                    counts[locus][repeat_count] += 1
                mutex_out.release()
                queue_full.release()

            if not self.has_live_threads():
                # All processes should have terminated.
                if self.debug_output:
                    tprint('Main> All processes complete.')
                break
        # end while loop

        return counts
from multiprocessing import BoundedSemaphore

if __name__ == '__main__':
    bsem = BoundedSemaphore(3)

    bsem.acquire()
    bsem.release()
    bsem.release()  # 在 Mac OS 平台上不会抛出异常
"""
Traceback (most recent call last):
  File "D:/code/picpython/pp_046_多进程同步之BoundedSemaphore.py", line 8, in <module>
    bsem.release()
ValueError: semaphore or lock released too many times
"""
예제 #7
0
    def process(self, input_filepath, msi_loci, config):
        self.__reset()

        # Generate dictionary read counts for loci
        counts = {}
        loci = []
        for locus in msi_loci:
            counts[locus.locus()] = {}
            loci.append(locus)

        # Generate input and output queues and (mutex) semaphores
        # for each.
        queue_out = Queue()
        queue_in = Queue()
        queue_full = BoundedSemaphore(100)
        full = Semaphore(0)
        empty = BoundedSemaphore(40)
        mutex_out = Semaphore(1)
        mutex_in = Semaphore(1)

        # Set amount of consumer threads; minimum one.
        consumer_threads = config['threads'] - 1
        if consumer_threads < 1:
            consumer_threads = 1

        # Create producer thread; currently only using single thread
        # since I/O is more of the limiter than CPU bound processes.
        self.__producer = Process(target=self.extract_reads, args=(
            input_filepath, 
            msi_loci, 
            full, 
            empty, 
            mutex_out, 
            queue_in, 
            consumer_threads))
        self.__producer.start()


        # Spawn the set amount of threads/processes
        if self.debug_output:
            tprint('Main> Generating {0} analyzer process(es).'.format(consumer_threads))
        for i in range(0, consumer_threads):
            p = Process(target=self.read_analyzer, args=(
                queue_in, 
                queue_out, 
                full, 
                empty, 
                mutex_in, 
                mutex_out,
                queue_full))
            self.__consumers.append(p)
            self.__consumers[-1].start()

        # Iterate through the loci, fetching any reads and pushing them to 
        # the pool of threads, collecting the output as they process it.
        query_delay = 0.050 # In seconds
       
        loop_counter = 0
        proc_check_interval = 100
        while (not queue_out.empty() or self.has_live_threads()):
            # Sleep for the set amount of time so the queue isn't constantly
            # getting hammered with queries
            time.sleep(query_delay)
            loop_counter += 1
            if loop_counter % proc_check_interval is 0:
                # Time to check that the consumers
                # didn't die while the producer is still producing
                mutex_out.acquire()
                self.status_check(queue_out.qsize())
                mutex_out.release()

            while not queue_out.empty():
                # There is data on the queue to be processed;
                # the return from the queue should be a tuple
                # with (locus, repeat_count)
                mutex_out.acquire()
                result = queue_out.get()
                locus = result[0]
                repeat_count = result[1]
                if repeat_count >= 0:
                    if locus not in counts:
                        counts[locus] = {}
                    if repeat_count not in counts[locus]:
                        counts[locus][repeat_count] = 0
                    counts[locus][repeat_count] += 1
                mutex_out.release()
                queue_full.release()

            if not self.has_live_threads():
                # All processes should have terminated.
                if self.debug_output:
                    tprint('Main> All processes complete.')
                break
        # end while loop

        return counts
예제 #8
0
class Zydra():
    def __init__(self):
        self.start_time = time.monotonic()
        self.process_lock = BoundedSemaphore(value=cpu_count())
        self.counter_lock = threading.BoundedSemaphore(value=1)
        self.banner()
        self.stop = Queue(maxsize=1)
        self.stop.put(False)
        self.count = Queue(maxsize=1)
        self.threads = []
        self.name = Queue(maxsize=1)
        self.name.put(str("a"))
        self.process_count = 0
        self.limit_process = 500
        self.shot = 5000

    def fun(self, string):
        list = []
        fer = ['-', "\\", "|", '/']
        for char in string:
            list.append(char)
        timer = 0
        pointer = 0
        fer_pointer = 0
        while timer < 20:
            list[pointer] = list[pointer].upper()
            print("\r" +
                  self.blue("".join(str(x)
                                    for x in list) + " " + fer[fer_pointer]),
                  end="")
            list[pointer] = list[pointer].lower()
            max_fer = len(fer) - 1
            if fer_pointer == max_fer:
                fer_pointer = -1
            max = len(list) - 1
            if pointer == max:
                pointer = -1
            pointer += 1
            fer_pointer += 1
            timer += 1
            time.sleep(0.1)
            if timer == 20:
                print("\r" + self.blue(string) + "\n", end="")
                return

    def blue(self, string):
        return colored(string, "blue", attrs=['bold'])

    def green(self, string):
        return colored(string, "green", attrs=['bold'])

    def yellow(self, string):
        return colored(string, "yellow", attrs=['bold'])

    def red(self, string):
        return colored(string, "red", attrs=['bold'])

    def bwhite(self, string):
        return colored(string, "white", attrs=['bold'])

    def white(self, string):
        return colored(string, "white")

    def detect_file_type(self, file):
        if str(file).split(".")[-1] == "rar":
            return "rar"
        elif str(file).split(".")[-1] == "zip":
            return "zip"
        elif str(file).split(".")[-1] == "pdf":
            return "pdf"
        else:
            return "text"

    def count_word(self, dict_file):
        count = 0
        with open(dict_file, "r") as wordlist:
            for line in wordlist:
                count += 1
        return count

    def count_possible_com(self, chars, min, max):
        x = min
        possible_com = 0
        while x <= max:
            possible_com += len(chars)**x
            x += 1
        return possible_com

    def counter(self, max_words):
        self.counter_lock.acquire()
        num = self.count.get()
        # print(self.count)
        # print(num)
        if num != 0:
            self.count.put(num - 1)
            current_word = max_words - int(num) + 1
            percent = (100 * current_word) / max_words
            width = (current_word + 1) / (max_words / 42)  # 100 / 25
            bar = "\t" + self.white("Progress : [") + "#" * int(width) + " " * (42 - int(width)) \
                  + "] " + self.yellow(str("%.3f" % percent) + " %")
            # time.sleep(1)
            sys.stdout.write(u"\t\u001b[1000D" + bar)
            sys.stdout.flush()
        self.counter_lock.release()

    def handling_too_many_open_files_error(self):
        if self.process_count == self.limit_process:
            for x in self.threads:
                x.join()
            self.threads = []
            self.limit_process += 500

    def search_zip_pass(self, passwords_list, compress_file, max_words):
        try:
            temp_file = self.create_temporary_copy(compress_file,
                                                   passwords_list[1])
            for word in passwords_list:
                password = word.strip('\r').strip('\n')
                stop = self.stop.get()
                self.stop.put(stop)
                if stop is False:  # if find password dont doing more
                    self.counter(max_words)
                    try:
                        with zipfile.ZipFile(temp_file, "r") as zfile:
                            zfile.extractall(
                                pwd=bytes(password, encoding='utf-8'))
                            self.stop.get()
                            self.stop.put(True)
                            time.sleep(3)
                            print("\n\t" + self.green("[+] Password Found: " +
                                                      password) + "\n")
                            break
                    except Exception as e:
                        # print(e)
                        pass
                else:
                    break
            if os.path.isfile(temp_file):
                os.remove(os.path.abspath(temp_file))
            # last_process_number = int(max_words / self.shot) + (max_words % self.shot > 0)
            if str(self.last_process_number) in str(current_process().name):
                time.sleep(20)
                stop = self.stop.get()
                self.stop.put(stop)
                if stop is False:
                    print("\n\t" + self.red("[-] password not found") + "\n")
                else:
                    pass
            self.process_lock.release()
        except KeyboardInterrupt:
            self.process_lock.release()

    def create_temporary_copy(self, file, word):
        name = self.name.get()
        name2 = str(word)
        self.name.put(name2)
        directory_path = "temp_directory"
        try:
            os.mkdir(directory_path)
        except FileExistsError:
            pass
        temp_file_name = "temp" + name + "." + self.file_type
        temp_file_path = directory_path + '/' + temp_file_name  # linux path
        shutil.copy2(file, temp_file_path)
        return temp_file_path

    def delete_temporary_directory(self):
        if os.path.exists("temp_directory"):
            shutil.rmtree("temp_directory")

    def search_rar_pass(self, passwords_list, compress_file, max_words):
        try:
            temp_file = self.create_temporary_copy(compress_file,
                                                   passwords_list[1])
            for word in passwords_list:
                password = word.strip('\r').strip('\n')
                stop = self.stop.get()
                self.stop.put(stop)
                if stop is False:  # if find password dont doing more
                    self.counter(max_words)
                    try:
                        with rarfile.RarFile(temp_file, "r") as rfile:
                            # print(password)  very useful for trouble shooting
                            rfile.extractall(pwd=password)
                            self.stop.get()
                            self.stop.put(True)
                            time.sleep(3)
                            print("\n\t" + self.green("[+] Password Found: " +
                                                      password + '\n'))
                            break
                    except Exception as e:
                        # print(e)
                        pass
                else:
                    break
            if os.path.isfile(temp_file):
                os.remove(os.path.abspath(temp_file))
            # last_process_number = int(max_words / 500) + (max_words % 500 > 0)
            if str(self.last_process_number) in str(current_process().name):
                time.sleep(20)
                stop = self.stop.get()
                self.stop.put(stop)
                if stop is False:
                    print("\n\t" + self.red("[-] password not found") + "\n")
                else:
                    pass
            self.process_lock.release()
        except KeyboardInterrupt:
            self.process_lock.release()

    def search_pdf_pass(self, passwords_list, file, max_words):
        try:
            temp_file = self.create_temporary_copy(file, passwords_list[1])
            for word in passwords_list:
                password = word.strip('\r').strip('\n')
                stop = self.stop.get()
                self.stop.put(stop)
                if stop is False:  # if find password dont doing more
                    self.counter(max_words)
                    proc = subprocess.Popen([
                        'qpdf', "--password="******"\n\t" +
                              self.green("[+] Password Found: " + password))
                        print("\t" + self.blue("[*]") +
                              self.white(" Your decrypted file is ") +
                              self.bwhite(self.decrypted_file_name) + "\n")
                        # self.end_time()
                        break
                    elif status == 2:
                        pass
                else:
                    break
            # for thread in self.threads:
            #     print(thread)
            # last_process_number = int(max_words / 500) + (max_words % 500 > 0)
            if str(self.last_process_number) in str(current_process().name):
                time.sleep(20)
                stop = self.stop.get()
                self.stop.put(stop)
                if stop is False:
                    print("\n\t" + self.red("[-] password not found") + "\n")
                else:
                    pass
            self.process_lock.release()
        except KeyboardInterrupt:
            self.process_lock.release()

    def search_shadow_pass(self, passwords_list, salt_for_crypt, crypt_pass,
                           max_words, user):
        try:
            for word in passwords_list:
                password = word.strip('\r').strip('\n')
                stop = self.stop.get()
                self.stop.put(stop)
                if stop is False:  # if find password dont doing more
                    self.counter(max_words)
                    cryptword = crypt.crypt(password, salt_for_crypt)
                    if cryptword == crypt_pass:
                        self.stop.get()
                        self.stop.put(True)
                        time.sleep(4)
                        print("\n\t" +
                              self.green("[+] Password Found: " + password) +
                              "\n")
                        break
                    else:
                        pass
                else:
                    break
            # print(last_process_number)
            # print(str(current_process().name))
            if str(self.last_process_number) in str(current_process().name):
                time.sleep(20)
                stop = self.stop.get()
                self.stop.put(stop)
                if stop is False:
                    print("\n\t" + self.red("[-] password not found") + "\n")
                else:
                    pass
            self.process_lock.release()
        except KeyboardInterrupt:
            self.process_lock.release()

    def last_words_check(self, max_words, passwords_list, file):
        while True:
            if self.stop is True:
                exit(0)
            elif self.count == len(passwords_list):  # self_cont kam mishe
                if self.file_type is "rar":
                    self.search_rar_pass(passwords_list, file, max_words)
                if self.stop is False:
                    print("\n\t" + self.red("[-] Password not found") + "\n")
                    self.delete_temporary_directory()
                    self.end_time()
                return
            else:
                pass

    def dict_guess_password(self, dict_file, file):
        last_check = 0
        passwords_group = []
        possible_words = self.count_word(dict_file)
        self.last_process_number = int(
            possible_words / self.shot) + (possible_words % self.shot > 0)
        self.count.put(possible_words)
        self.file_type = self.detect_file_type(file)
        self.fun("Starting password cracking for " + file)
        print("\n " + self.blue("[*]") +
              self.white(" Count of possible passwords: ") +
              self.bwhite(str(possible_words)))
        if self.file_type == "text":
            file = open(file)
            for line in file.readlines():
                self.count.get()
                self.count.put(possible_words)
                crypt_pass = line.split(':')[1].strip(' ')
                if crypt_pass not in ['*', '!', '!!']:
                    user = line.split(':')[0]
                    print("  " + self.blue("[**]") +
                          self.white(" cracking Password for: ") +
                          self.bwhite(user))
                    algorythm = crypt_pass.split('$')[1].strip(' ')
                    salt = crypt_pass.split('$')[2].strip(' ')
                    salt_for_crypt = '$' + algorythm + '$' + salt + '$'
                    with open(dict_file, "r") as wordlist:
                        for word in wordlist:
                            passwords_group.append(word)
                            last_check += 1
                            self.handling_too_many_open_files_error()
                            if (len(passwords_group)
                                    == self.shot) or (possible_words -
                                                      last_check == 0):
                                passwords = passwords_group
                                passwords_group = []
                                self.process_lock.acquire()
                                stop = self.stop.get()
                                self.stop.put(stop)
                                if stop is False:
                                    t = Process(target=self.search_shadow_pass,
                                                args=(passwords,
                                                      salt_for_crypt,
                                                      crypt_pass,
                                                      possible_words, user))
                                    self.threads.append(t)
                                    self.process_count += 1
                                    t.start()
                                else:
                                    self.process_lock.release()
                            else:
                                continue
                        for x in self.threads:
                            x.join()
                        self.last_process_number *= 2
            self.end_time()
        elif self.file_type == "zip":
            with open(dict_file, "r") as wordlist:
                for word in wordlist:
                    passwords_group.append(word)
                    last_check += 1
                    self.handling_too_many_open_files_error()
                    if (len(passwords_group)
                            == self.shot) or (possible_words - last_check
                                              == 0):
                        passwords = passwords_group
                        passwords_group = []
                        self.process_lock.acquire()
                        stop = self.stop.get()
                        self.stop.put(stop)
                        if stop is False:
                            t = Process(target=self.search_zip_pass,
                                        args=(passwords, file, possible_words))
                            self.threads.append(t)
                            self.process_count += 1
                            t.start()
                        else:
                            self.process_lock.release()
                    else:
                        continue
                for x in self.threads:
                    x.join()
                self.delete_temporary_directory()
                self.end_time()
        elif self.file_type == "pdf":
            self.decrypted_file_name = "decrypted_" + file.split('/')[-1]
            with open(dict_file, "r") as wordlist:
                for word in wordlist:
                    passwords_group.append(word)
                    last_check += 1
                    self.handling_too_many_open_files_error()
                    if (len(passwords_group)
                            == self.shot) or (possible_words - last_check
                                              == 0):
                        passwords = passwords_group
                        passwords_group = []
                        self.process_lock.acquire()
                        stop = self.stop.get()
                        self.stop.put(stop)
                        if stop is False:
                            t = Process(target=self.search_pdf_pass,
                                        args=(passwords, file, possible_words))
                            self.threads.append(t)
                            self.process_count += 1
                            t.start()
                        else:
                            self.process_lock.release()
                    else:
                        continue
                for x in self.threads:
                    x.join()
                self.delete_temporary_directory()
                self.end_time()
        elif self.file_type == "rar":
            with open(dict_file, "r") as wordlist:
                for word in wordlist:
                    passwords_group.append(word)
                    last_check += 1
                    self.handling_too_many_open_files_error()
                    if (len(passwords_group)
                            == self.shot) or (possible_words - last_check
                                              == 0):
                        passwords = passwords_group
                        passwords_group = []
                        self.process_lock.acquire()
                        stop = self.stop.get()
                        self.stop.put(stop)
                        if stop is False:  # ok finishing all process after finding password
                            t = Process(target=self.search_rar_pass,
                                        args=(passwords, file, possible_words))
                            self.threads.append(t)
                            self.process_count += 1
                            t.start()
                        else:
                            self.process_lock.release()
                    else:
                        continue
                for x in self.threads:
                    x.join()
                self.delete_temporary_directory()
                self.end_time()

    def bruteforce_guess_password(self, chars, min, max, file):
        last_check = 0
        passwords_group = []
        possible_com = self.count_possible_com(chars, int(min), int(max))
        self.last_process_number = int(
            possible_com / self.shot) + (possible_com % self.shot > 0)
        self.count.put(possible_com)
        self.file_type = self.detect_file_type(file)
        self.fun("Starting password cracking for " + file)
        print("\n " + self.blue("[*]") +
              self.white(" Count of possible passwords: ") +
              self.bwhite(str(possible_com)))
        if self.file_type == "text":
            file = open(file)
            for line in file.readlines():
                self.count.get()
                self.count.put(possible_com)
                crypt_pass = line.split(':')[1].strip(' ')
                if crypt_pass not in ['*', '!', '!!']:
                    user = line.split(':')[0]
                    print("  " + self.blue("[**]") +
                          self.white(" cracking Password for: ") +
                          self.bwhite(user))
                    algorythm = crypt_pass.split('$')[1].strip(' ')
                    salt = crypt_pass.split('$')[2].strip(' ')
                    salt_for_crypt = '$' + algorythm + '$' + salt + '$'
                    for password_length in range(int(min), int(max) + 1):
                        for guess in itertools.product(chars,
                                                       repeat=password_length):
                            guess = ''.join(guess)
                            passwords_group.append(guess)
                            last_check += 1
                            self.handling_too_many_open_files_error()
                            if (len(passwords_group)
                                    == self.shot) or (possible_com - last_check
                                                      == 0):
                                passwords = passwords_group
                                passwords_group = []
                                self.process_lock.acquire()
                                stop = self.stop.get()
                                self.stop.put(stop)
                                if stop is False:
                                    t = Process(
                                        target=self.search_shadow_pass,
                                        args=(passwords, salt_for_crypt,
                                              crypt_pass, possible_com, user))
                                    self.threads.append(t)
                                    self.process_count += 1
                                    t.start()
                                else:
                                    self.process_lock.release()
                            else:
                                continue
                    for x in self.threads:
                        x.join()
                    self.last_process_number *= 2
            self.end_time()
        elif self.file_type == "zip":
            for password_length in range(int(min), int(max) + 1):
                for guess in itertools.product(chars, repeat=password_length):
                    guess = ''.join(guess)
                    passwords_group.append(guess)
                    last_check += 1
                    self.handling_too_many_open_files_error()
                    if (len(passwords_group)
                            == self.shot) or (possible_com - last_check == 0):
                        passwords = passwords_group
                        passwords_group = []
                        self.process_lock.acquire()
                        stop = self.stop.get()
                        self.stop.put(stop)
                        if stop is False:
                            t = Process(target=self.search_zip_pass,
                                        args=(passwords, file, possible_com))
                            self.threads.append(t)
                            self.process_count += 1
                            t.start()
                        else:
                            self.process_lock.release()
                    else:
                        continue
            for x in self.threads:
                x.join()
            self.delete_temporary_directory()
            self.end_time()
        elif self.file_type == "pdf":
            self.decrypted_file_name = "decrypted_" + file.split('/')[-1]
            for password_length in range(int(min), int(max) + 1):
                for guess in itertools.product(chars, repeat=password_length):
                    guess = ''.join(guess)
                    passwords_group.append(guess)
                    last_check += 1
                    self.handling_too_many_open_files_error()
                    if (len(passwords_group)
                            == self.shot) or (possible_com - last_check == 0):
                        passwords = passwords_group
                        passwords_group = []
                        self.process_lock.acquire()
                        stop = self.stop.get()
                        self.stop.put(stop)
                        if stop is False:
                            t = Process(target=self.search_pdf_pass,
                                        args=(passwords, file, possible_com))
                            self.threads.append(t)
                            self.process_count += 1
                            t.start()
                        else:
                            self.process_lock.release()
                    else:
                        continue
            for x in self.threads:
                x.join()
            self.delete_temporary_directory()
            self.end_time()
        elif self.file_type == "rar":
            for password_length in range(int(min), int(max) + 1):
                for guess in itertools.product(chars, repeat=password_length):
                    guess = ''.join(guess)
                    passwords_group.append(guess)
                    last_check += 1
                    self.handling_too_many_open_files_error()
                    if (len(passwords_group)
                            == self.shot) or (possible_com - last_check == 0):
                        passwords = passwords_group
                        passwords_group = []
                        self.process_lock.acquire()
                        stop = self.stop.get()
                        self.stop.put(stop)
                        if stop is False:  # ok finishing all process after finding password
                            t = Process(target=self.search_rar_pass,
                                        args=(passwords, file, possible_com))
                            self.threads.append(t)
                            self.process_count += 1
                            t.start()
                        else:
                            self.process_lock.release()
                    else:
                        continue
            for x in self.threads:
                x.join()
            self.delete_temporary_directory()
            self.end_time()

    def make_chars(self, char_type):
        chartype_list = char_type.split(",")
        chars = ""
        for chartype in chartype_list:
            if chartype == "lowercase":
                chars += string.ascii_lowercase
            elif chartype == "uppercase":
                chars += string.ascii_uppercase
            elif chartype == "letters":
                chars += string.ascii_letters
            elif chartype == "digits":
                chars += string.digits
            elif chartype == "symbols":
                chars += string.punctuation
            elif chartype == "space":
                chars += " "
            else:
                return False
        return chars

    def banner(self):
        term.clear()
        term.pos(1, 1)
        # check if font "epic" exists on this system
        # sudo wget http://www.figlet.org/fonts/epic.flf -O /usr/share/figlet/epic.flf
        bannerfont = "epic" if os.path.exists(
            '/usr/share/figlet/epic.flf') else "banner"
        banner = pyfiglet.figlet_format("ZYDRA", font=bannerfont).replace(
            "\n", "\n\t\t", 7)

        cprint("\r\n\t" + "@" * 61, "blue", end="")
        cprint("\n\t\t" + banner + "\t\tAuthor : Hamed Hosseini",
               "blue",
               attrs=['bold'])
        cprint("\t" + "@" * 61 + "\n", "blue")

    def end_time(self):
        self.stop = True
        end_time_show = time.asctime()
        end_time = time.monotonic()
        execution_time = (timedelta(seconds=end_time - self.start_time))
        print(self.blue("End time ==> ") + self.white(end_time_show))
        print(
            self.blue("Execution time ==> ") +
            self.white(str(execution_time)) + "\n")
        term.saveCursor()
        term.pos(7, 15)
        term.writeLine("ok", term.green, term.blink)
        term.restoreCursor()
        exit(0)

    def main(self):
        start_time_show = time.asctime()
        usage = "%prog [options] [args]" \
                "\n\nDictionary Mode:" \
                "\n   %prog -f <file> -d <wordlist>" \
                "\n\nBrute force Mode:" \
                "\n   %prog -f <file> -b <char_type> -m <min_length> -x <max_length>" \
                "\n\n   Available char_type:" \
                "\n\t<lowercase>  The lowercase letters abcdefghijklmnopqrstuvwxyz" \
                "\n\t<uppercase>  The uppercase letters ABCDEFGHIJKLMNOPQRSTUVWXYZ" \
                "\n\t<letters>    The concatenation of the lowercase and uppercase"\
                "\n\t<digits>     numbers 0123456789" \
                "\n\t<symbols>    punctuation characters !#$%&'()*+,-./:;<=>?@[\]^_`{|}~'" + '"' \
                "\n\t<space>      space character" \
                "\n   You can select multiple character types." \
                "\n\tExample: %prog -f <file> -b <space,digits> -m 1 -x 8"

        parser = optparse.OptionParser(usage)
        parser.add_option("-d",
                          dest="dictfile",
                          type='string',
                          help="Specifies dictionary file")
        parser.add_option("-f",
                          dest="file",
                          type='string',
                          help="Specifies the file")
        parser.add_option("-b",
                          dest="chartype",
                          type='string',
                          help="Specifies the character type")
        parser.add_option("-m",
                          dest="minlength",
                          type='string',
                          help="Specifies minimum length of password")
        parser.add_option("-x",
                          dest="maxlength",
                          type='string',
                          help="Specifies maximum length of password")

        (options, args) = parser.parse_args()
        try:
            if options.file:
                if os.path.isfile(options.file):
                    file = os.path.abspath(options.file)
                    if options.dictfile:
                        if os.path.isfile(options.dictfile):
                            dictfile = os.path.abspath(options.dictfile)
                            print(
                                self.blue("Start time ==> ") +
                                self.white(start_time_show) + "\n")
                            self.dict_guess_password(dictfile, file)
                        else:
                            parser.error(" " + options.dictfile +
                                         " dictionary file does not exist")
                            exit(0)
                    elif options.chartype:
                        chars = self.make_chars(options.chartype)
                        if chars is False:
                            parser.error(
                                " " + options.chartype +
                                " character type is not valid, Use --help for more info"
                            )
                        if options.minlength is None:
                            parser.error(" Enter minimum length of password")
                            exit(0)
                        if options.maxlength is None:
                            parser.error(" Enter maximum length of password")
                            exit(0)
                        if options.minlength > options.maxlength:
                            parser.error(
                                " Min and Max must be numbers and Min must be \nless than Max or be the same"
                                ", Use --help for more info")
                            exit(0)
                        else:
                            print(
                                self.blue("Start time ==> ") +
                                self.white(start_time_show) + "\n")
                            self.bruteforce_guess_password(
                                chars, options.minlength, options.maxlength,
                                file)

                    else:
                        parser.error(
                            " Choose a wordlist or bruteforce method, Use --help for more info"
                        )
                        exit(0)
                else:
                    parser.error("" + options.file + " file does not exist")
                    exit(0)
            else:
                parser.error(" Choose a file, Use --help for more info")
                exit(0)

        except KeyboardInterrupt:
            time.sleep(1)
            self.delete_temporary_directory()
            print(
                self.red("\n\n [-] Detected CTRL+C") +
                self.white("\n closing app...\n Finish\n"))
            # self.end_time()
            exit(0)
예제 #9
0
class Pipeline(ABC, Process, Friendable):
    """
  A superclass for OpenCV lane detection

  It handles opening and closing a video feed and subclasses simply implement the lane detection algorithm in their
  _run() method. Additionally, provides tools to visualize the steps of a lane detection pipeline and for the user to
  manually apply a mask to an image.

  Use:
    start() to open video feed and calls _run()
    _run() is where the lane detection algorithm is implemented (MUST be overriden by subclass)
    stop() to close video feed and windows and stop calling run()

    #TODO: update this list
    take_screenshot()
    _add_knot()


  # TODO: update this list
  :ivar _pipeline: a list of frames showing the different steps of the pipeline. It should only ever store the pipeline
                for a single iteration at a given instant (i.e. its length should never exceed the number of steps in
                the pipeline) - not guaranteed to be filled
  :ivar _show_pipeline: a flag indicating whether or not each step in the pipeline should be shown
  :ivar _debug: a flag indicating whether or not the use is debugging the pipeline. In debug, the pipeline is shown and
                debug statements are enabled
  :ivar _capture: the OpenCV capture object (CvCapture) that the lane detection algorithm should run on
  :ivar _fps: the fps of the capture object that the lane detection algorithmn is run on
  :ivar _name: the name of the pipeline (derived from the class name)
  :ivar _screen: the image where the pipeline steps are drawn
  :ivar _image_mask_enabled: indicates whether or not the current instance of the pipeline supports image masks
  :ivar _region_of_interest_mask: stores the region of interest mask (will be empty if image mask is disabled)

  :ivar __stop: indicates whether or not the pipeline is stopped
  :ivar __source: the source that the pipeline is being run on
  :ivar __show_pipeline_steps: indicates whether or not the intermediate steps in the pipeline should be shown or just
                the final step
  :ivar __cached_pipeline: stores the last snapshot of the pipeline since it was cleared (since the current one is not
                guaranteed to be filled)
  :ivar __paused: indicates whether or not the pipeline is currently paused
  :ivar __while_paused: stores the function to be executed while the pipeline is paused

  :friend pipeline.utils.Visualizer
  """
    def __init__(self,
                 source: str,
                 *,
                 n_consumers: int = 0,
                 should_start: bool = True,
                 show_pipeline: bool = True,
                 image_mask_enabled: bool = True,
                 debug: bool = False):
        """
    Declares instance variables (_show_pipeline, _debug, _capture) and starts the pipeline according to should_start

    :param source: the filename or device that the pipeline should be run on
    :param should_start: a flag indicating whether or not the pipeline should start as soon as it is instantiated
    :param show_pipeline: a flag indicating whether or not each step in the pipeline should be shown
    :param debug: a flag indicating whether or not the use is debugging the pipeline. In debug, the pipeline is
                  shown and debug statements are enabled
    """

        # call superclass constructor
        super().__init__()

        # initialize instance variables

        # private - use property accessor
        self._source = source
        self._frame = None
        self._name = self.__class__.__name__
        self._fps = None
        self._image_mask_enabled = image_mask_enabled
        self._debug = debug
        self._show_pipeline = show_pipeline or self._debug
        self._show_pipeline_steps = settings.display.show_pipeline_steps
        self._knots = []

        screen_dimensions = (settings.window.height, settings.window.width,
                             NUM_IMAGE_CHANNELS)
        # protected
        self._screen = numpy.zeros(screen_dimensions, numpy.uint8)
        self._visualizer = None
        self._region_of_interest = None
        self._capture = None

        self._n_consumers = n_consumers  # the number of objects consuming the result of this pipeline
        if self._n_consumers > 0:
            # stores the currently detected lane - must be a queue as the detected result is accessed from a seperate process
            self._lanes_queue = Queue()
            self.__n_consuming = 0  # tracks the number of consumers currently 'consuming' the detected result
            self.__n_consuming_mutex = Lock(
            )  # blocks access to self.__n_consuming
            # used to block pipeline from running while consumers are
            self.__consumer_semaphore = BoundedSemaphore(1)
            # used to prevent multiple iterations of pipeline from running before being consumed
            self.__producer_sempahore = BoundedSemaphore(1)

        # private - only accessible by class
        self.__current_knots = [
        ]  # maybe not be filled (most likely, will be partially filled)
        self.__stop = False
        self.__paused = False
        self.__while_paused = None

        # check if the pipeline should start immediately
        if should_start and not self.is_alive():
            self.start()

##### Property Accessors for Read-Only Instance Variables #####

    @property
    def source(self) -> str:
        return self._source

    @property
    def frame(self) -> numpy.array:
        return self._frame

    @property
    def name(self) -> str:
        return self._name

    @property
    def fps(self) -> int:
        return self._fps

    @property
    def image_mask_enabled(self) -> bool:
        return self._image_mask_enabled

    @property
    def knots(self) -> list[numpy.array]:
        return self.__current_knots

    @property
    def region_of_interest(self) -> list[tuple[int, int]]:
        return self._region_of_interest.get()

##### Property Accessors and Mutators for Instance Variables #####

    @property
    def show_pipeline(self) -> bool:
        return self._show_pipeline

    @show_pipeline.setter
    def show_pipeline(self, value: bool) -> None:
        self._show_pipeline = value

    @property
    def show_pipeline_steps(self) -> bool:
        return self._show_pipeline_steps

    @show_pipeline_steps.setter
    def show_pipeline(self, value: bool) -> None:
        self._show_pipeline_steps = value

    @property
    def debug(self) -> bool:
        return self._debug

    @debug.setter
    def debug(self, value: bool) -> None:
        self._debug = value


##### Method Definitions #####

    def start_consumption(self):
        if self._n_consumers == 0:
            raise RuntimeError(
                'Cannot read lanes as the number of configured consumers is',
                self._n_consumers)

        # read the lanes from the queue
        # this also blocks until the pipeline finishes executing on the current frame
        lanes = self._lanes_queue.get(block=True)

        # prevent pipeline from running while consumer is running
        with self.__n_consuming_mutex:
            if self.__n_consuming == 0:
                self.__consumer_semaphore.acquire(
                )  # first consumer acquires semaphore
            self.__n_consuming += 1

        assert self._lanes_queue.empty(
        )  # ensure that the queue only ever contains the most recent detection
        return lanes

    def end_consumption(self):
        if self._n_consumers == 0:
            raise RuntimeError(
                'Cannot read lanes as the number of configured consumers is',
                self._n_consumers)

        # prevent pipeline from running while consumer is running
        with self.__n_consuming_mutex:
            if self.__n_consuming == self._n_consumers:
                self.__n_consuming = 0  # reset the number of consumers 'consuming'
                self.__consumer_semaphore.release(
                )  # last consumer releases semaphore
                self.__producer_sempahore.release(
                )  # allow pipeline to start on next iteration

    def _add_lanes(self, lanes):
        # only enqueue if we have a consumer configured, otherwise, fail silently
        # allows this method can be called by subclasses without having to worry about consumers
        if self._n_consumers > 0:
            self._lanes_queue.put((self._frame, lanes), block=False)

    def start(self):
        """
    Starts running the process which then subsequently opens the video and runs the pipeline

    :return: void
    """

        super().start()  # call Process::start() to start the Process execution

    def __open_source(self, src: str) -> None:
        """
    Opens a cv2.capture object

    This method is susceptible to raise any errors caused by cv2.VideoCapture(src)

    :param input: the filename or device id to be opened
    :raises: RuntimeError is raised if this method is called when _capture is already open
    :return: void
    """

        # check that capture is not already open
        if not self._capture:
            self._capture = cv2.VideoCapture(
                src)  # open capture from provided input
            self._fps = self._capture.get(cv2.CAP_PROP_FPS)  # get fps of video
        else:
            # throw error if capture is already open
            raise RuntimeError(
                'Cannot open {input} as a capture is already open'.format(
                    input=input))

    def run(self):
        """

    :return: void
    """

        # check if function was called by Process superclass and raise an error if it was not
        if inspect.stack()[1].function != '_bootstrap':
            raise RuntimeError(
                'pipeline::run can only be invoked by multiprocessing::Process'
            )

        self.__open_source(self._source)  # open input
        first_frame = True  # initialize a flag used to indicate if init_pipeline should be run

        # loop run() while the capture is open and we we have not stopped running
        while not self.__stop and self._capture.isOpened():
            # if the pipeline is not paused, read a frame from the capture and call the pipeline
            if not self.is_paused():
                start_time = time.time()  # store start time of loop
                return_value, frame = self._capture.read(
                )  # read a frame of the capture

                # check that the next frame was read successfully
                # i.e. that we have not hit the end of the video or encountered an error
                if return_value:
                    self._frame = frame
                    # if it is the first frame of the pipeline, run init_pipeline, then set flag to false
                    if first_frame:
                        self._init_pipeline(self._frame)
                        first_frame = False

                    # prevent pipeline from running until consumers are done
                    if self._n_consumers > 0:
                        # wait until consumer is finished consuming
                        self.__producer_sempahore.acquire()
                        self.__producer_sempahore.release()
                        self.__consumer_semaphore.acquire(
                        )  # prevent consumers from running until after this iteration
                    self._run(self._frame)  # run the pipeline
                    if self._n_consumers > 0:
                        self.__consumer_semaphore.release(
                        )  # allow consumers to run on this iteration
                        # prevent pipeline from continuing until the current iteration is consumed
                        self.__producer_sempahore.acquire()

                    if self._show_pipeline:  # display the pipeline
                        # self.__display_pipeline()
                        self._visualizer.get()
                else:
                    self.stop(
                    )  # stop the pipeline if we hit the end of the video or encountered an error

                # only sleep if stop was not called (i.e. we will read the next frame)
                if not self.__stop:
                    # 1 second / fps = time to sleep for each frame subtract elapsed time
                    time_to_sleep = max(
                        1 / self._fps - (time.time() - start_time), 0)
                    time.sleep(time_to_sleep)
            # if the pipeline is paused and the whilepaused handler is defined, call it
            elif self.__while_paused is not None:
                self.__while_paused(
                )  # NOTE: the pipeline will block until the function returns

            keypress = cv2.waitKey(1) & 0xFF  # get the keypress
            # if a key was pressed, call the handler with the pressed key
            if keypress:
                self.__handle_keypress(keypress)

            # reset the pipeline now that the current iteration has finished
            self.__clear_pipeline()

        self._add_lanes(constants.SENTINEL)

    def __handle_keypress(self, keypress):
        """
    Handles actions based on a keypress. Works as a delegator to delegate actions based on specific keypress. If the
    keypress maps to a default action, that actions is invoked, otherwise the keypress is passed to the subclass.

    :param keypress: the code of the keypress
    :param frame: the current frame of the pipeline when the keypress occurred
    :return: void
    """

        # q - stop the pipeline
        if keypress == ord('q'):
            self.stop()
        # p - toggle displaying the pipeline steps
        elif keypress == ord('p'):
            self._show_pipeline_steps = not self._show_pipeline_steps
        # esc - reset the debug image (top right)
        elif keypress == ord('\x1b'):
            self._visualizer.reset_debug_image()
        # s - take a screenshot of the pipeline (saves each knot in the pipeline)
        elif keypress == ord('s'):
            self.take_screenshot()
        # m - allow user to edit mask of source image (if image mask is enabled)
        elif keypress == ord('m') and self._image_mask_enabled:
            self._region_of_interest.editor(self.frame)
        # other non-default case (let the subclass handle these cases if desired)
        else:
            self._handle_keypress(keypress)

    def _handle_keypress(self, keypress):
        """
    @Override - subclass CAN override this function (it is optional)
    Where subclass can add custom keypress events. Cannot override keypress events in pipeline.py. This inhibits the use
    of the 'q', 'p', and 's' keys and possibly the 'm' key, depending on the state of _image_mask_enabled.

    :param keypress: the code of the keypress (will never correspond to any of the keys used for default keypress events)
    :param frame: the current frame of the pipeline when the keypress occurred
    :return: void
    """

        pass

    def _init_pipeline(self, first_frame):
        """
    @Override - subclass CAN override this function (it is optional)
    Where subclass can do any required initialization prior to the pipeline beginning to run. Default action is to get
    the user to apply a mask to the image (if image mask is enabled, otherwise there is no default action).

    :param first_frame: the first frame of the pipeline
    :return: void
    """

        # check if image mask is enabled, if so check if a mask was already defined or get the user to define one
        if self._image_mask_enabled:
            self._region_of_interest = utils.RegionOfInterest(self)
            assert self._region_of_interest.load(
            )  # assert that we loaded a region of interest mask
            self._visualizer = utils.Visualizer(self)

    def is_paused(self):
        """
    Gets whether or not the pipeline is paused

    :return: boolean
    """

        return self.__paused

    def pause(self, whilepaused=None):
        """
    Pauses the pipeline and will call the whilepaused handler until the pipeline is unpaused

    :raises: RuntimeError is raised if this method is called when the pipeline is already paused
    :return: void
    """

        if self.is_paused():
            raise RuntimeError(
                'Cannot pause a pipeline that is already paused')
        self.__paused = True
        self.__while_paused = whilepaused

    def unpause(self):
        """
    Unpauses the pipeline

    :raises: RuntimeError is raised if this method is called when the pipeline is not paused
    :return: void
    """

        if not self.is_paused():
            raise RuntimeError(
                'Cannot unpause a pipeline that is not currently paused')
        self.__paused = False
        self.__while_paused = None

    def stop(self):
        """
    Closes _capture and all windows and stops looping run()

    :return: void
    """

        self._capture.release()  # close the capture
        cv2.destroyAllWindows()  # remove all windows
        self.__stop = True  # set flag to stop process execution

    def take_screenshot(self, extension='jpg'):
        """
    Takes a screenshot of the pipeline (saves each knot in the pipeline)

    :param extension (optional) (default=jpg): the file extension that images should be saved as
    :return: void
    """
        def do_screenshot() -> None:
            output_dir = '{base_output_dir}/{pipeline_name}/{timestamp}' \
              .format(base_output_dir='output',
                      pipeline_name=self._name,
                      timestamp=datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))

            os.makedirs(output_dir, exist_ok=True)

            # iterate through each step of the pipeline and save the corresponding image (which is done in a new thread)
            i = 1
            for name, image in self._knots:
                # format the file name
                file_name = '{index} - {name}.{ext}'.format(index=i,
                                                            name=name,
                                                            ext=extension)
                cv2.imwrite(os.path.join(output_dir, file_name), image)
                i += 1

        screenshot = Thread(target=do_screenshot)
        screenshot.start()
        # do not join the thread here since we want the screenshot to occur in the background

    def _add_knot(self, name, image):
        """
    Adds a knot to the lane detection pipeline

    :param name: the name of the image to be added
    :param image: the image to be added to the end of the pipeline
    :return: void
    """
        self.__current_knots.append((name, image))

    def __clear_pipeline(self):
        """
    Empties the stored steps of the lane detection pipeline

    :return: void
    """
        self._knots = self.__current_knots
        self.__current_knots = []

    def __display_pipeline(self):
        """
    Displays the pipeline to the user. Depending on the state of _show_pipeline_steps, the steps of the pipeline may
    or may not be visible.

    :return: void
    """
        def add_knot_to_screen(index, knot, new_dimension, position):
            """
      Displays a single knot in the pipeline

      :param index: the index of the knot in the pipeline
      :param knot: the knot in the pipeline to be displayed
      :param new_dimension: the desired size of the knot to be displayed - a tuple of the form (width, height)
      :param position: the position of the top left corner of the image on self._screen - a tuple of the form (y, x)
      :return: void
      """

            # destructure the knot
            name, image = knot
            # resize the image to the desired size
            resized_image = cv2.resize(image, dsize=new_dimension)
            # add the image to the screen at the specified location
            start_y, start_x = position
            width, height = new_dimension
            self._screen[start_y:(start_y + height),
                         start_x:(start_x + width)] = resized_image

            # add the title of the knot to the image
            title = '{index}  -  {name}'.format(index=index, name=name)
            title_bounding_box, title_basline = cv2.getTextSize(
                title, settings.font.face, settings.font.scale,
                settings.font.thickness)
            text_width, text_height = title_bounding_box
            position = (start_x + settings.font.edge_offset,
                        start_y + text_height + settings.font.edge_offset)
            cv2.putText(self._screen, title, position, settings.font.face,
                        settings.font.scale, settings.font.color,
                        settings.font.thickness)

        # split the pipeline into the final and intermediate steps
        pipeline_steps = self.__current_knots[:-1]
        final_step = self.__current_knots[-1]
        num_pipeline_steps = len(pipeline_steps)

        # display the steps of the pipeline only if that option is selected
        if self._show_pipeline_steps and num_pipeline_steps > 0:
            # initialize the aspect ratio (gets set later when the pipeline is checked for consistent aspect ratios)
            aspect_ratio = None
            # check that all steps of the pipeline have the same aspect ratio (if not raise and error)
            # simultaneously, check if any images are single channel and convert them to the correct number of channels
            for i in range(len(self.__current_knots)):
                name, image = self.__current_knots[i]
                # get the dimensions of the image
                # note that if the image is single channel, then num_channels will be undefined -> set it to default value after
                height, width, *num_channels = image.shape
                num_channels = num_channels[0] if num_channels else 1

                # check for aspect ratio consistency throughout the pipeline
                if aspect_ratio is None:
                    aspect_ratio = height / width
                elif height / width != aspect_ratio:
                    raise RuntimeError(
                        'aspect ratio of images is not consistent throughout pipeline'
                    )

                # if the image is single channel (grayscale), convert it to 3 channels (still grayscale)
                # this allows the images to be merged into one
                if num_channels == 1:
                    temp_image = numpy.empty(
                        (height, width, Pipeline.NUM_IMAGE_CHANNELS))
                    for channel in range(Pipeline.NUM_IMAGE_CHANNELS):
                        temp_image[:, :, channel] = image
                    if i < num_pipeline_steps:
                        pipeline_steps[i] = (name, temp_image)
                    else:
                        final_step = (name, temp_image)

            # return the next lowest square greater than num
            next_square = lambda num: int(
                round(math.pow(math.ceil(math.sqrt(abs(num))), 2)))

            # the actual ratio of the final image (will be grater than or equal to settings.display.minimum_final_image_ratio)
            RESULT_IMAGE_RATIO = settings.display.minimum_final_image_ratio
            # initialize variables concerned with the size of pipeline step bins
            # (will get set later when calculating RESULT_IMAGE_RATIO)
            num_bins_top_left = None
            horizontal_bins_dimension = None
            vertical_bins_dimension = None

            # minimic a do-while loop
            while True:

                def calculate_dimensions_given_ratio(ratio):
                    """
          Calculates pipeline step bin dimensions given a ratio for the final step of the pipeline

          :param ratio: the ratio of the final step of the pipeline to the rest of the screen
          :return: void
          """

                    # allow this function to modify specific variables in outer scope
                    nonlocal num_bins_top_left, horizontal_bins_dimension, vertical_bins_dimension
                    # do the bin calculations
                    num_bins_top_left = next_square(
                        math.ceil(num_pipeline_steps * (1 - ratio)))
                    horizontal_bins_dimension = int(
                        round(math.sqrt(num_bins_top_left)))
                    vertical_bins_dimension = math.pow(
                        1 - ratio, -1) * horizontal_bins_dimension

                # calculate the bin dimensions for the current ratio
                calculate_dimensions_given_ratio(RESULT_IMAGE_RATIO)
                # if the number of vertical bins is an integer (divides evenly into the screen), then break the loop
                # (the while condition of the do-while loop)
                if vertical_bins_dimension.is_integer():
                    break
                # store the previously calculated ratio
                prev = RESULT_IMAGE_RATIO
                # calculate the new ratio to use
                RESULT_IMAGE_RATIO = 1 - horizontal_bins_dimension / math.ceil(
                    vertical_bins_dimension)
                # due to floating point precision errors, sometimes repeating decimals get rounded in an undesirable manner
                # essentially, the program has successfully found the desired ratio, but rounds it causing the program to fail
                # if this occurs, raise an error and instruct the user to fix the rounding error and update the value in
                # pipeline settings
                if prev == RESULT_IMAGE_RATIO:
                    raise FloatingPointError(
                        'Failed trying to find best ratio for result image. This was caused by a floating point decimal error on repeating digits. Update the pipeline.config file and try again. The recomended ratio is {new_ratio} (simply fix the repeating decimals)'
                        .format(new_ratio=RESULT_IMAGE_RATIO))

            # calculate the dimensions of a pipeline step
            container_width = int(
                round(settings.window.width * (1 - RESULT_IMAGE_RATIO)))
            step_width = container_width // horizontal_bins_dimension
            step_height = int(round(step_width * aspect_ratio))

            # iterate through all but the final step and display those knots in the pipeline
            i = 0
            for name, image in pipeline_steps:
                # add the knot to the screen at the correct position
                start_y = step_height * (i // horizontal_bins_dimension)
                start_x = step_width * (i % horizontal_bins_dimension)
                add_knot_to_screen(i + 1,
                                   knot=(name, image),
                                   new_dimension=(step_width, step_height),
                                   position=(start_y, start_x))

                i += 1

            # add the final step to the screen in the bottom left quarter
            output_width = int(
                round(settings.window.width * RESULT_IMAGE_RATIO))
            output_height = int(
                round(settings.window.height * RESULT_IMAGE_RATIO))
            add_knot_to_screen(
                len(self.__current_knots),
                knot=final_step,
                new_dimension=(output_width, output_height),
                position=(settings.window.height - output_height,
                          settings.window.width - output_width))

            cv2.imshow(self._name, self._screen)
        else:
            name, image = final_step
            cv2.imshow(self._name, image)

    @abstractmethod
    def _run(self, frame):
        """
    @Override - subclass MUST override this function
    Where the lane detection algorithm is written, it is called on each frame of _capture.

    :param frame: the frame of the capture that the pipeline should be run on
    :return: void
    """

        pass
예제 #10
0
class scheduler(object):
    """multi-process scheduler"""

    def __init__(self):
        # if any jobs marked in run state when scheduler starts 
        # replace their state with X to mark that they have been shutdown
        db = DAL(config.uri, auto_import=True, migrate=False, 
                 folder=config.dbdir)
        myset = db(db.jobs.state == 'R')
        myset.update(state='X')
        db.commit()
        self.sem = BoundedSemaphore(config.np) 
        self.mutex = Lock()

    def poll(self):
        """start polling thread which checks queue status every second"""
        t = threading.Thread(target = self.assignTask)
        t.start()

    def assignTask(self):
        while(True):
            #print "scheduler:", self.qstat(), "jobs in queued state", 
            #time.asctime()
            j = self.qfront()
            if j is not None and j > 0:
                self.start(j)            
            time.sleep(1) 

    def qsub(self,app,cid,user,np,pry,desc=""):
        """queue job ... really just set state to 'Q'."""
        db = DAL(config.uri, auto_import=True, migrate=False, 
                 folder=config.dbdir)
        jid = db.jobs.insert(user=user, app=app, cid=cid, state=STATE_QUEUED, 
                              description=desc, time_submit=time.asctime(), np=np, priority=pry)
        db.commit()
        db.close()
        return str(jid)

    def qfront(self):
        """pop the top job off of the queue that is in a queued 'Q' state"""
        db = DAL(config.uri, auto_import=True, migrate=False, 
                 folder=config.dbdir)
        myorder = db.jobs.priority 
        #myorder = db.jobs.priority | db.jobs.id
        row = db(db.jobs.state==STATE_QUEUED).select(orderby=myorder).first()
        db.close()
        if row: return row.id
        else: return None

    def qdel(self,jid):
        """delete job jid from the queue"""
        #try:
        db = DAL(config.uri, auto_import=True, migrate=False, 
                 folder=config.dbdir)
        del db.jobs[jid]
        db.commit()
        db.close()
        #return True
        #except:
        #    return False

    def qstat(self):
        """return the number of jobs in a queued 'Q' state"""
        db = DAL(config.uri, auto_import=True, migrate=False, 
                 folder=config.dbdir)
        return db(db.jobs.state==STATE_QUEUED).count()
        db.close()

    def start(self,jid):
        """start running a job by creating a new process"""
        global p, jobs
        myjobs = []
        db = DAL(config.uri, auto_import=True, migrate=False, 
                 folder=config.dbdir)
        user = db.jobs(jid).user
        app = db.jobs(jid).app
        cid = db.jobs(jid).cid
        np = db.jobs(jid).np
        if np > 1: # use mpi
            command = db.apps(name=app).command
            command = config.mpirun + " -np " + str(np) + " " + command
        else: # dont use mpi
            command = db.apps(name=app).command

        exe = os.path.join(config.apps_dir,app,app)
        outfn = app + ".out"
        cmd = command + ' >& ' + outfn

        run_dir = os.path.join(config.user_dir,user,app,cid)

        # if number procs available fork new process with command
        for i in range(np):
            self.sem.acquire()
        p = Process(target=self.start_job, args=(run_dir,cmd,app,jid,np,))
        myjobs.append(p)
        print len(myjobs), myjobs
        p.start()
        for i in range(np):
            self.sem.release()

    def start_job(self,run_dir,cmd,app,jid,np):
        """this is what the separate job process runs"""
        global popen
        #print '*** pid:', os.getpid()
        for i in range(np):
            self.sem.acquire()
        # update state to 'R' for run
        self._set_state(jid,STATE_RUN)
        mycwd = os.getcwd()
        os.chdir(run_dir) # change to case directory
        
        #popen = subprocess.Popen(cmd, stdout=subprocess.PIPE)
        #subprocess.call(cmd, shell=True)
        popen = subprocess.Popen(cmd, shell=True)
        #os.system(cmd)

        # let user know job has ended
        outfn = app + ".out"
        with open(outfn,"a") as f:
            f.write("FINISHED EXECUTION")
        # update state to 'C' for completed
        os.chdir(mycwd) # return to SciPaaS root directory
        self._set_state(jid,STATE_COMPLETED)
        for i in range(np):
            self.sem.release()

    def _set_state(self,jid,state):
        """update state of job"""
        self.mutex.acquire()
        db = DAL(config.uri, auto_import=True, migrate=False, 
                 folder=config.dbdir)
        db.jobs[jid] = dict(state=state)
        db.commit()
        db.close()
        self.mutex.release()

    def stop(self,app):
        #p.shutdown
        #popen.terminate()
        popen.terminate()
        time.sleep(0.1)
        #print p, p.is_alive()
        #p.join()
        #print p, p.is_alive()
        #print '%s.exitcode = %s' % (popen.name, popen.exitcode)
        #os.system("killall " + app)

    def test_qfront(self):
        print self.qfront()
예제 #11
0
파일: queues.py 프로젝트: ogrisel/loky
class Queue(object):
    def __init__(self, maxsize=0):
        if maxsize <= 0:
            # Can raise ImportError (see issues #3770 and #23400)
            from multiprocessing.synchronize import SEM_VALUE_MAX as maxsize
        self._maxsize = maxsize
        self._reader, self._writer = connection.Pipe(duplex=False)
        self._rlock = Lock()
        self._opid = os.getpid()
        if sys.platform == 'win32':
            self._wlock = None
        else:
            self._wlock = Lock()
        self._sem = BoundedSemaphore(maxsize)
        # For use by concurrent.futures
        self._ignore_epipe = False

        self._after_fork()

        if sys.platform != 'win32':
            register_after_fork(self, Queue._after_fork)

    def __getstate__(self):
        assert is_spawning()
        return (self._ignore_epipe, self._maxsize, self._reader, self._writer,
                self._rlock, self._wlock, self._sem, self._opid)

    def __setstate__(self, state):
        (self._ignore_epipe, self._maxsize, self._reader, self._writer,
         self._rlock, self._wlock, self._sem, self._opid) = state
        self._after_fork()

    def _after_fork(self):
        debug('Queue._after_fork()')
        self._notempty = threading.Condition(threading.Lock())
        self._buffer = collections.deque()
        self._thread = None
        self._jointhread = None
        self._joincancelled = False
        self._closed = False
        self._close = None
        self._send_bytes = self._writer.send_bytes
        self._recv_bytes = self._reader.recv_bytes
        self._poll = self._reader.poll

    def put(self, obj, block=True, timeout=None):
        assert not self._closed
        if not self._sem.acquire(block, timeout):
            raise Full

        with self._notempty:
            if self._thread is None:
                self._start_thread()
            self._buffer.append(obj)
            self._notempty.notify()

    def get(self, block=True, timeout=None):
        if block and timeout is None:
            with self._rlock:
                res = self._recv_bytes()
            self._sem.release()
        else:
            if block:
                deadline = time.time() + timeout
            if not self._rlock.acquire(block, timeout):
                raise Empty
            try:
                if block:
                    timeout = deadline - time.time()
                    if timeout < 0 or not self._poll(timeout):
                        raise Empty
                elif not self._poll():
                    raise Empty
                res = self._recv_bytes()
                self._sem.release()
            finally:
                self._rlock.release()
        # unserialize the data after having released the lock
        return LokyPickler.loads(res)

    def qsize(self):
        # Raises NotImplementedError on Mac OSX because of broken sem
        # getvalue()
        return self._maxsize - self._sem._semlock._get_value()

    def empty(self):
        return not self._poll()

    def full(self):
        return self._sem._semlock._is_zero()

    def get_nowait(self):
        return self.get(False)

    def put_nowait(self, obj):
        return self.put(obj, False)

    def close(self):
        self._closed = True
        try:
            self._reader.close()
        finally:
            close = self._close
            if close:
                self._close = None
                close()

    def join_thread(self):
        debug('Queue.join_thread()')
        assert self._closed
        if self._jointhread:
            self._jointhread()

    def cancel_join_thread(self):
        debug('Queue.cancel_join_thread()')
        self._joincancelled = True
        try:
            self._jointhread.cancel()
        except AttributeError:
            pass

    def _start_thread(self):
        debug('Queue._start_thread()')

        # Start thread which transfers data from buffer to pipe
        self._buffer.clear()
        self._thread = threading.Thread(
            target=Queue._feed,
            args=(self._buffer, self._notempty, self._send_bytes, self._wlock,
                  self._writer.close, self._ignore_epipe),
            name='QueueFeederThread')
        self._thread.daemon = True

        debug('doing self._thread.start()')
        self._thread.start()
        debug('... done self._thread.start()')

        # On process exit we will wait for data to be flushed to pipe.
        #
        # However, if this process created the queue then all
        # processes which use the queue will be descendants of this
        # process.  Therefore waiting for the queue to be flushed
        # is pointless once all the child processes have been joined.
        created_by_this_process = (self._opid == os.getpid())
        if not self._joincancelled and not created_by_this_process:
            self._jointhread = Finalize(self._thread,
                                        Queue._finalize_join,
                                        [weakref.ref(self._thread)],
                                        exitpriority=-5)

        # Send sentinel to the thread queue object when garbage collected
        self._close = Finalize(self,
                               Queue._finalize_close,
                               [self._buffer, self._notempty],
                               exitpriority=10)

    @staticmethod
    def _finalize_join(twr):
        debug('joining queue thread')
        thread = twr()
        if thread is not None:
            thread.join()
            debug('... queue thread joined')
        else:
            debug('... queue thread already dead')

    @staticmethod
    def _finalize_close(buffer, notempty):
        debug('telling queue thread to quit')
        with notempty:
            buffer.append(_sentinel)
            notempty.notify()

    @staticmethod
    def _feed(buffer, notempty, send_bytes, writelock, close, ignore_epipe):
        debug('starting thread to feed data to pipe')
        nacquire = notempty.acquire
        nrelease = notempty.release
        nwait = notempty.wait
        bpopleft = buffer.popleft
        sentinel = _sentinel
        if sys.platform != 'win32':
            wacquire = writelock.acquire
            wrelease = writelock.release
        else:
            wacquire = None

        try:
            while 1:
                nacquire()
                try:
                    if not buffer:
                        nwait()
                finally:
                    nrelease()
                try:
                    while 1:
                        obj = bpopleft()
                        if obj is sentinel:
                            debug('feeder thread got sentinel -- exiting')
                            close()
                            return

                        # serialize the data before acquiring the lock
                        obj = LokyPickler.dumps(obj)
                        if wacquire is None:
                            send_bytes(obj)
                        else:
                            wacquire()
                            try:
                                send_bytes(obj)
                            finally:
                                wrelease()
                except IndexError:
                    pass
        except Exception as e:
            if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE:
                return
            # Since this runs in a daemon thread the resources it uses
            # may be become unusable while the process is cleaning up.
            # We ignore errors which happen after the process has
            # started to cleanup.
            try:
                if is_exiting():
                    info('error in queue thread: %s', e)
                else:
                    import traceback
                    traceback.print_exc()
            except Exception:
                pass
예제 #12
0
파일: scheduler.py 프로젝트: jpotterm/spc
class Scheduler(object):
    """multi-process scheduler"""

    def __init__(self):
        # if any jobs marked in run state when scheduler starts
        # replace their state with X to mark that they have been shutdown
        db = DAL(config.uri, auto_import=True, migrate=False, folder=config.dbdir)
        myset = db(db.jobs.state == STATE_RUN)
        myset.update(state=STATE_STOPPED)
        db.commit()
        self.sem = BoundedSemaphore(config.np)
        self.mutex = Lock()
        # set time zone
        try:
            os.environ['TZ'] = config.time_zone
            time.tzset()
        except: pass

    def poll(self):
        """start polling thread which checks queue status every second"""
        t = threading.Thread(target = self.assignTask)
        t.daemon = True
        t.start()

    def assignTask(self):
        global myjobs
        manager = Manager()
        myjobs = manager.dict()
        while(True):
            self.stop_expired_jobs()
            j = self.qfront()
            if j is not None and j > 0:
                self.start(j)
            time.sleep(1)

    def qsub(self, app, cid, uid, cmd, np, pry, walltime, desc=""):
        """queue job ... really just set state to 'Q'."""
        db = DAL(config.uri, auto_import=True, migrate=False,
                 folder=config.dbdir)
        jid = db.jobs.insert(uid=uid, app=app, cid=cid, command=cmd, state=STATE_QUEUED,
                              description=desc, time_submit=time.asctime(),
                              walltime=walltime, np=np, priority=pry)
        db.commit()
        db.close()
        return str(jid)

    def qfront(self):
        """pop the top job off of the queue that is in a queued 'Q' state"""
        db = DAL(config.uri, auto_import=True, migrate=False, folder=config.dbdir)
        myorder = db.jobs.priority
        #myorder = db.jobs.priority | db.jobs.id
        row = db(db.jobs.state==STATE_QUEUED).select(orderby=myorder).first()
        db.close()
        if row: return row.id
        else: return None

    def qdel(self,jid):
        """delete job jid from the queue"""
        db = DAL(config.uri, auto_import=True, migrate=False, folder=config.dbdir)
        del db.jobs[jid]
        db.commit()
        db.close()

    def qstat(self):
        """return the number of jobs in a queued 'Q' state"""
        db = DAL(config.uri, auto_import=True, migrate=False,
                 folder=config.dbdir)
        return db(db.jobs.state==STATE_QUEUED).count()
        db.close()

    def start(self,jid):
        """start running a job by creating a new process"""
        db = DAL(config.uri, auto_import=True, migrate=False, folder=config.dbdir)
        uid = db.jobs(jid).uid
        user = db.users(uid).user
        app = db.jobs(jid).app
        cid = db.jobs(jid).cid
        np = db.jobs(jid).np
        if np > 1: # use mpi
            command = db.jobs(jid).command
            command = config.mpirun + " -np " + str(np) + " " + command
        else: # dont use mpi
            command = db.jobs(jid).command

        # redirect output to appname.out file
        outfn = app + ".out"
        cmd = command + ' > ' + outfn + ' 2>&1 '
        print "cmd:", cmd

        run_dir = os.path.join(user_dir, user, app, cid)

        # if number procs available fork new process with command
        for i in range(np):
            self.sem.acquire()
        p = Process(target=self.start_job, args=(run_dir,cmd,app,jid,np,myjobs))
        p.start()
        for i in range(np):
            self.sem.release()

    def start_job(self,run_dir,cmd,app,jid,np,myjobs):
        """this is what the separate job process runs"""
        for i in range(np): self.sem.acquire()
        # update state to 'R' for run
        self._set_state(jid,STATE_RUN)
        mycwd = os.getcwd()
        os.chdir(run_dir) # change to case directory

        pro = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
        myjobs[jid] = pro

        pro.wait() # wait for job to finish
        myjobs.pop(long(jid),None) # remove job from buffer

        # let user know job has ended
        outfn = app + ".out"
        with open(outfn,"a") as f:
            f.write("FINISHED EXECUTION")

        # update state to 'C' for completed
        os.chdir(mycwd)
        self._set_state(jid,STATE_COMPLETED)
        for i in range(np):
            self.sem.release()

    def _set_state(self,jid,state):
        """update state of job"""
        self.mutex.acquire()
        db = DAL(config.uri, auto_import=True, migrate=False, folder=config.dbdir)
        db.jobs[jid] = dict(state=state)
        db.commit()
        db.close()
        self.mutex.release()

    def stop_expired_jobs(self):
        """shutdown jobs that exceed their time limit"""
        db = DAL(config.uri, auto_import=True, migrate=False, folder=config.dbdir)
        rows = db(db.jobs.state==STATE_RUN).select()
        for row in rows:
            if row:
                walltime = int(row.walltime)
                time_submit = time.mktime(datetime.datetime.strptime(
                              row.time_submit, "%a %b %d %H:%M:%S %Y").timetuple())
                now = time.mktime(datetime.datetime.now().timetuple())
                runtime = now - time_submit
                if runtime > walltime:
                    print "INFO: scheduler stopped job", row.id, "REASON: reached timeout"
                    self.stop(row.id)

        db.close()

    def stop(self,jid):
        p = myjobs.pop(long(jid),None)
        if p: os.killpg(os.getpgid(p.pid), signal.SIGTERM)

        # the following doesn't work because it gets overwritten by line 128 above
        # need a way to feedback to start_job method whether job has been stopped or not
        # db = DAL(config.uri, auto_import=True, migrate=False, folder=config.dbdir)
        # myset = db(db.jobs.id == jid)
        # myset.update(state=STATE_STOPPED)
        # db.commit()
        # db.close()

    def test_qfront(self):
        print self.qfront()