def search_c_parallel(psp, min_xy=3, max_xy=10, min_c=3, max_c=int_ceiling, min_z=3, max_z=10): serial_jobs = get_job_gen(min_xy, max_xy, min_c, max_c, min_z, max_z) sem_size = cpu_count() proc_sem = BoundedSemaphore(sem_size) max_tested = Value('i', -1) next_threshhold = Value('i', threshhold_gap) def sync_search(search_range): search_c_serial(psp, *search_range) proc_sem.release() with max_tested.get_lock(): max_tested.value = max(max_tested.value, search_range[3]) #unnecessary; max_tested lock takes care of atomicity. included for maintainability with next_threshhold.get_lock(): if max_tested.value > next_threshhold.value: print_write(True, "\t\t{!s}".format(next_threshhold.value)) next_threshhold.value += threshhold_gap print_write(True, "\n\tStarting search for xy in [{!s},{!s}), {} c in [{!s},{!s}), and z in [{!s},{!s})\n".format(min_xy, max_xy, "psp" if psp else "all", min_c, max_c, min_z, max_z)) for s_job in serial_jobs: proc_sem.acquire() p = Process(target=sync_search, args=(s_job,)) p.start() wait_sem(proc_sem, sem_size) print_write(True, "\n\tCompleted search for xy in [{!s},{!s}), {} c in [{!s},{!s}), and z in [{!s},{!s})".format(min_xy, max_xy, "psp" if psp else "all", min_c, max_c, min_z, max_z))
class BoundedExecutor: """BoundedExecutor behaves as a ThreadPoolExecutor which will block on calls to submit() once the limit given as "bound" work items are queued for execution. :param bound: Integer - the maximum number of items in the work queue :param max_workers: Integer - the size of the thread pool """ def __init__(self, bound, max_workers): self.executor = ThreadPoolExecutor(max_workers=max_workers) self.semaphore = BoundedSemaphore(bound + max_workers) """See concurrent.futures.Executor#submit""" def submit(self, fn, *args, **kwargs): self.semaphore.acquire() try: future = self.executor.submit(fn, *args, **kwargs) except: self.semaphore.release() raise else: future.add_done_callback(lambda x: self.semaphore.release()) return future """See concurrent.futures.Executor#shutdown""" def shutdown(self, wait=True): self.executor.shutdown(wait)
def iterate_minibatches(self, generate_jobs, batch_size, buffer_size, postprocess_batch=None): assert buffer_size >= batch_size buffer_semaphore = BoundedSemaphore( buffer_size) # jobs currently in buffer generate_jobs = iter(generate_jobs) postprocess_batch = postprocess_batch or nop @background(max_prefetch=-1) def _load_jobs_and_iterate_batch_sizes(): """ Loads jobs into queue, yields batch_size every time a generator can order this batch_size from the database """ current_batch = 0 for task in generate_jobs: buffer_semaphore.acquire() self.add_jobs(task) current_batch += 1 if current_batch == batch_size: yield current_batch # you can now load additional batch_size elements current_batch = 0 if current_batch != 0: yield current_batch # load the remaining elements for allowed_batch_size in _load_jobs_and_iterate_batch_sizes(): batch = self.get_results_batch(allowed_batch_size) for _ in batch: buffer_semaphore.release() yield postprocess_batch(batch)
def _add_token_loop(self, time_delta): """Add token every time_delta seconds.""" while True: try: BoundedSemaphore.release(self) except ValueError: # ignore if already max possible value pass sleep(time_delta) # ignore EINTR
def __init__(self): # if any jobs marked in run state when scheduler starts # replace their state with X to mark that they have been shutdown db = DAL(config.uri, auto_import=True, migrate=False, folder=config.dbdir) myset = db(db.jobs.state == 'R') myset.update(state='X') db.commit() self.sem = BoundedSemaphore(config.np) self.mutex = Lock()
def __init__(self): self.start_time = time.monotonic() self.process_lock = BoundedSemaphore(value=cpu_count()) self.counter_lock = threading.BoundedSemaphore(value=1) self.banner() self.stop = Queue(maxsize=1) self.stop.put(False) self.count = Queue(maxsize=1) self.threads = [] self.name = Queue(maxsize=1) self.name.put(str("a")) self.process_count = 0 self.limit_process = 500 self.shot = 5000
def __init__(self): # if any jobs marked in run state when scheduler starts # replace their state with X to mark that they have been shutdown db = DAL(config.uri, auto_import=True, migrate=False, folder=config.dbdir) myset = db(db.jobs.state == STATE_RUN) myset.update(state=STATE_STOPPED) db.commit() self.sem = BoundedSemaphore(config.np) self.mutex = Lock() # set time zone try: os.environ['TZ'] = config.time_zone time.tzset() except: pass
def vparInit(): #Define o ponteiro para funcao de callback CLBKFUNC = CFUNCTYPE(c_long, c_long, POINTER(tResults)) clbk_func = CLBKFUNC(callback) #Carrega a dll do VPAR multithread global lib_vpar lib_vpar = CDLL("vparmt.so") #Inicializa o VPAR ret = lib_vpar.vparmtInit(clbk_func, 101, -1, 0, 0, 0, 1) if ret == 1: print("VPAR INIT OK") else: print("ERRO AO INICIAR LIB VPAR: codigo %u" % ret) sys.exit() global licensedCores licensedCores = lib_vpar.NumLicenseCores() print("Cores disponiveis = %u" % licensedCores) global vparSemaphore vparSemaphore = BoundedSemaphore(value=licensedCores) #Cria a configuracao para os metodos do motor VPAR global engSettings engSettings = tRecognitionEngineSettings() engineSettingsConfig(engSettings)
def main(): termination_event = Event() semaphore = BoundedSemaphore(_shared.CONSUMERS_SIMULTANEOUSLY) allow_consumation = Event() generator_lock = Lock() workers = [] for generator_id in range(_shared.GENERATORS_COUNT): args = (generator_id, termination_event, semaphore, allow_consumation, generator_lock) workers.append( Process(target=process_target(launch_generator), args=args)) workers[-1].start() for reader_id in range(_shared.CONSUMERS_COUNT): args = (reader_id, termination_event, semaphore, allow_consumation) workers.append( Process(target=process_target(launch_consumer), args=args)) workers[-1].start() try: while not termination_event.is_set(): time.sleep(0.5) except KeyboardInterrupt: termination_event.set() finally: for worker in workers: worker.join()
def measure(args): procs = [] sema = BoundedSemaphore(int(args.procs)) for proc in range(args.procs): sema.acquire() p = Process(target=readTiles, args=(args, proc, sema)) p.start() procs.append((p, dir)) if args.verbosity > 1: print("start {}".format(proc), file=sys.stdout) for p in procs: p[0].join() if args.verbosity > 1: print("join {}".format(p), file=sys.stdout)
def test_bounded_semaphore_with_normal_value(self): from multiprocessing.synchronize import Semaphore as synchronize_Semaphore from multiprocessing import BoundedSemaphore Globalize.bounded_semaphore(bsmp=BoundedSemaphore()) from multirunnable.api.manage import Running_Bounded_Semaphore assert isinstance( Running_Bounded_Semaphore, synchronize_Semaphore ) is True, "It should save instance to the target global variable *Running_Bounded_Semaphore*."
def measure(args): procs=[] sema=BoundedSemaphore(int(args.procs)) inputf = file(args.tiles,'r') for line in inputf: s = "curl -s 'http://35.203.177.233/fcgi-bin/iipsrv.fcgi?DeepZoom='"+args.slide+line.rstrip()+">/dev/null" # print (line.rstrip()) # os.system(s) sema.acquire() p = Process(target = readTile, args = (s,sema)) p.start() procs.append((p,dir)) for p in procs: p[0].join() inputf.close()
class SessionManager(object): _prepared_statement_cache = {} _multiprocess_lock = BoundedSemaphore(4) @classmethod def create_pool(cls, cluster, keyspace, consistency_level=None, fetch_size=None, default_timeout=None, process_count=None): # cls.__pool = Pool(processes=process_count, initializer=cls._setup, # initargs=(cluster, keyspace, consistency_level, fetch_size, default_timeout)) cls._setup(cluster, keyspace, consistency_level, fetch_size, default_timeout) @classmethod def _setup(cls, cluster, keyspace, consistency_level, fetch_size, default_timeout): cls.cluster = cluster with cls._multiprocess_lock: cls.__session = cls.cluster.connect(keyspace) cls.__session.row_factory = tuple_factory if consistency_level is not None: cls.__session.default_consistency_level = consistency_level if fetch_size is not None: cls.__session.default_fetch_size = fetch_size if default_timeout is not None: cls.__session.default_timeout = default_timeout cls._prepared_statement_cache = {} @classmethod def prepare(cls, statement): if statement not in cls._prepared_statement_cache: cls._prepared_statement_cache[statement] = cls.__session.prepare(statement) return cls._prepared_statement_cache[statement] def close_pool(self): self.pool.close() self.pool.join() @classmethod def get_query_columns(cls, table): # grab the column names from our metadata cols = cls.cluster.metadata.keyspaces[cls.__session.keyspace].tables[table].columns.keys() cols = map(_clean_column_name, cols) unneeded = ['subsite', 'node', 'sensor', 'method'] cols = [c for c in cols if c not in unneeded] return cols @classmethod def execute(cls, *args, **kwargs): return cls.__session.execute(*args, **kwargs) @classmethod def session(cls): return cls.__session @classmethod def pool(cls): return cls.__pool
def __init__(self, maxsize=0): if maxsize <= 0: # Can raise ImportError (see issues #3770 and #23400) from multiprocessing.synchronize import SEM_VALUE_MAX as maxsize self._maxsize = maxsize self._reader, self._writer = connection.Pipe(duplex=False) self._rlock = Lock() self._opid = os.getpid() if sys.platform == 'win32': self._wlock = None else: self._wlock = Lock() self._sem = BoundedSemaphore(maxsize) # For use by concurrent.futures self._ignore_epipe = False self._after_fork() if sys.platform != 'win32': register_after_fork(self, Queue._after_fork)
def main(args): with open(args.pairs, 'r') as pfile: data = pfile.readlines() pairs_list = [l.strip('\n').split(' ') for l in data] # parallelize matching nprocs = cpu_count() psema = BoundedSemaphore(nprocs) with Pool(nprocs) as pool: def cb(*args): print(f"{args[0][0]} {args[0][1]} {args[0][2]}") psema.release() return for reference, probe in pairs_list: psema.acquire() pool.apply_async(match_pair, (reference, probe, args.images, args.masks, args.tempdir, args.cfg_path), callback=cb)
def run_batch(item_list, maxthreads, dict_result=False): def cleanup(): logger = logging.getLogger(__name__) logger.debug('cleanup processes') for run_item in item_list: if run_item.process: run_item.process.terminate() logger = logging.getLogger(__name__) semaphore = BoundedSemaphore(maxthreads) try: for run_item in item_list: semaphore.acquire(True) run_item.queue = Queue() p = SemaphoreProcess(target=run_item.target, semaphore=semaphore, args=run_item.args, queue=run_item.queue) run_item.process = p p.start() for run_item in item_list: run_item.result = run_item.queue.get() if isinstance(run_item.result, Exception): logger.critical('%s, exiting' % run_item.result) cleanup() sys.exit(42) run_item.process.join() run_item.process = None if dict_result: result = {} for run_item in item_list: result[run_item.key] = run_item.result return result else: return [run_item.result for run_item in item_list] except KeyboardInterrupt: cleanup() raise KeyboardInterrupt()
def measure(args, ranges): procs = [] sema = BoundedSemaphore(int(args.procs)) next = 0 left = len(ranges) batch = (left / args.procs) + 1 for proc in range(args.procs): sema.acquire() counts = max(min(left, batch), 0) if args.verbosity > 1: print("{} {} {}".format(proc, next, counts)) p = Process(target=readRanges, args=(args, ranges, next, counts, sema)) p.start() procs.append((p, dir)) next += batch left -= batch for p in procs: p[0].join() if args.verbosity > 1: print("join {}".format(p), file=sys.stdout)
def analyze_eula(eula): # Categories to analyse, these will be done in parallel categories = [ formal.Formal, procedural.Procedural, substantive.Substantive ] # Create a semaphore to limit number of running processes running = BoundedSemaphore(int(os.getenv('analyze_max_threads', 1))) # We cannot return variables from threads, so instead create managed dictionary to pass objects back through ret_vars = Manager().dict() # Create a process declaration for each category in the above array processes = [] for cat in categories: # Allocate a space in the dictionary for their return values ret_vars[cat.__name__.lower()] = None # Describe the process, giving the eula (us), the semaphore, and the return dict processes.append( Process(target=cat_score, args=(eula, cat, ret_vars, running))) # Start processes in order of above array for process in processes: # Start process once sempahore aquired process.start() # Join each process so we don't exit until all are done for process in processes: process.join() # De-parallelize dict now that we are done ret_vars = ret_vars.copy() # Calculate overall score by summing the weighted score of each category then dividing by number of categories # i.e. simple average overall_score = int( sum(map(lambda x: x['weighted_score'], ret_vars.values())) / len(ret_vars)) grades = ['F', 'D', 'C', 'B', 'A'] return { 'title': eula.title, 'url': eula.url, 'overall_score': overall_score, 'overall_grade': grades[overall_score], 'categories': ret_vars }
def main_threaded(iniconfig): semaphore = BoundedSemaphore(CONCURRENCY_LIMIT) tasks = [] for appid in iniconfig: section = iniconfig[appid] task = Thread(target=checker, args=(section, appid, semaphore)) tasks.append(task) task.start() try: for t in tasks: t.join() except KeyboardInterrupt: for t in tasks: if hasattr(t, 'terminate'): # multiprocessing t.terminate() print 'Validation aborted.' sys.exit(1)
class RequestManager: def __init__(self, max_workers): self.max_workers = max_workers self.lock = Lock() self.sem = BoundedSemaphore(max_workers) self.last_request = Value('d', 0.0) self.last_restricted_request = Value('d', 0.0) @contextmanager def normal_request(self): with self.lock: self.sem.acquire() time.sleep( max( 0.0, self.last_restricted_request.value + 0.6 + (random.random() * 0.15) - time.time())) try: yield except Exception as e: raise e finally: self.last_request.value = time.time() self.sem.release() @contextmanager def restricted_request(self): with self.lock: for i in range(self.max_workers): self.sem.acquire() time.sleep( max( 0.0, self.last_request.value + 0.6 + (random.random() * 0.15) - time.time())) try: yield except Exception as e: raise e finally: self.last_request.value = time.time() self.last_restricted_request.value = time.time() for i in range(self.max_workers): self.sem.release()
def main(concurrency, dsn, jobs, file_input, file_output, skip_header): """Console script for timescale_bench.""" exit_code = 0 exec_sem = BoundedSemaphore(jobs) worker_queues = [WorkerQueue(dsn, exec_sem) for i in range(concurrency)] reader = csv.reader(file_input) if file_output: writer = csv.writer(file_output) if skip_header: next(reader, None) try: results = list(flush_batch(reader, worker_queues)) if not results: raise RuntimeError('No input provided') if file_output: for query_result in results: for result in query_result.results: writer.writerow(result) timing = dict( total=sum((res.duration for res in results)), shortest=min((res.duration for res in results)), median=statistics.median_high((res.duration for res in results)), avg=statistics.mean((res.duration for res in results)), longest=max((res.duration for res in results)), ) # Convert to timedelta for display timing = { key: timedelta(seconds=value) for key, value in timing.items() } click.echo(f"Number of queries processed: {len(results)}") click.echo(f" Total processing time: {timing['total']}") click.echo(f" Shortest query time: {timing['shortest']}") click.echo(f" Median query time: {timing['median']}") click.echo(f" Average time: {timing['avg']}") click.echo(f" Longest query time: {timing['longest']}") except Exception as err: click.echo(f'Failure: {err}') exit_code = 1 finally: return exit_code
def process(self, input_filepath, msi_loci, config): self.__reset() # Generate dictionary read counts for loci counts = {} loci = [] for locus in msi_loci: counts[locus.locus()] = {} loci.append(locus) # Generate input and output queues and (mutex) semaphores # for each. queue_out = Queue() queue_in = Queue() queue_full = BoundedSemaphore(100) full = Semaphore(0) empty = BoundedSemaphore(40) mutex_out = Semaphore(1) mutex_in = Semaphore(1) # Set amount of consumer threads; minimum one. consumer_threads = config['threads'] - 1 if consumer_threads < 1: consumer_threads = 1 # Create producer thread; currently only using single thread # since I/O is more of the limiter than CPU bound processes. self.__producer = Process(target=self.extract_reads, args=( input_filepath, msi_loci, full, empty, mutex_out, queue_in, consumer_threads)) self.__producer.start() # Spawn the set amount of threads/processes if self.debug_output: tprint('Main> Generating {0} analyzer process(es).'.format(consumer_threads)) for i in range(0, consumer_threads): p = Process(target=self.read_analyzer, args=( queue_in, queue_out, full, empty, mutex_in, mutex_out, queue_full)) self.__consumers.append(p) self.__consumers[-1].start() # Iterate through the loci, fetching any reads and pushing them to # the pool of threads, collecting the output as they process it. query_delay = 0.050 # In seconds loop_counter = 0 proc_check_interval = 100 while (not queue_out.empty() or self.has_live_threads()): # Sleep for the set amount of time so the queue isn't constantly # getting hammered with queries time.sleep(query_delay) loop_counter += 1 if loop_counter % proc_check_interval is 0: # Time to check that the consumers # didn't die while the producer is still producing mutex_out.acquire() self.status_check(queue_out.qsize()) mutex_out.release() while not queue_out.empty(): # There is data on the queue to be processed; # the return from the queue should be a tuple # with (locus, repeat_count) mutex_out.acquire() result = queue_out.get() locus = result[0] repeat_count = result[1] if repeat_count >= 0: if locus not in counts: counts[locus] = {} if repeat_count not in counts[locus]: counts[locus][repeat_count] = 0 counts[locus][repeat_count] += 1 mutex_out.release() queue_full.release() if not self.has_live_threads(): # All processes should have terminated. if self.debug_output: tprint('Main> All processes complete.') break # end while loop return counts
import requests import datetime from VehicleServices import VSUtils from VehicleServices.AppConfig import DevConfig from multiprocessing import BoundedSemaphore import json from VehicleServices.VSUtils import sendMail from requests.exceptions import ConnectionError VehicleServicesApp = Flask("VehicleServicesApp") _api = Api(VehicleServicesApp) MOTORBR_MAX_CONCURRENCY = 2 # Máximo de consultas paralelas que podem executar. MOTORBR_VEHICLE_URL_CONSTANT = "http://motorprod1.producao1.datacenter1:8080/MotorConsultas/consulta?matricula=1481190&cpf=09994423762&objeto=VEICULO&campo=PLACA&sistema=22&chave=" MOTORBR_TIMEOUT_SEC = 5 #Tempo em segundos do timeout do barramento ao chamar a consulta de veículos implementada aqui. semaphoreMotorBR = BoundedSemaphore(value = MOTORBR_MAX_CONCURRENCY) VSUtils.getLogger(__name__).info("*** START APLICAÇÃO CONSULTA MOTORBR ***") class VehicleServices(Resource): def __init__(self, *args, **kwargs): super(VehicleServices, self).__init__(*args, **kwargs) # Busca um veículo no motor pela placa. def get(self, licensePlate): global semaphoreMotorBR if(semaphoreMotorBR.acquire(block=False) == False): VSUtils.getLogger(__name__).info("*** Limite de consultas simultâneas excedido ***") return Response(status = 429) #429 Too Many Requests try: wait4MotorTimeStart = datetime.datetime.now()
class Zydra(): def __init__(self): self.start_time = time.monotonic() self.process_lock = BoundedSemaphore(value=cpu_count()) self.counter_lock = threading.BoundedSemaphore(value=1) self.banner() self.stop = Queue(maxsize=1) self.stop.put(False) self.count = Queue(maxsize=1) self.threads = [] self.name = Queue(maxsize=1) self.name.put(str("a")) self.process_count = 0 self.limit_process = 500 self.shot = 5000 def fun(self, string): list = [] fer = ['-', "\\", "|", '/'] for char in string: list.append(char) timer = 0 pointer = 0 fer_pointer = 0 while timer < 20: list[pointer] = list[pointer].upper() print("\r" + self.blue("".join(str(x) for x in list) + " " + fer[fer_pointer]), end="") list[pointer] = list[pointer].lower() max_fer = len(fer) - 1 if fer_pointer == max_fer: fer_pointer = -1 max = len(list) - 1 if pointer == max: pointer = -1 pointer += 1 fer_pointer += 1 timer += 1 time.sleep(0.1) if timer == 20: print("\r" + self.blue(string) + "\n", end="") return def blue(self, string): return colored(string, "blue", attrs=['bold']) def green(self, string): return colored(string, "green", attrs=['bold']) def yellow(self, string): return colored(string, "yellow", attrs=['bold']) def red(self, string): return colored(string, "red", attrs=['bold']) def bwhite(self, string): return colored(string, "white", attrs=['bold']) def white(self, string): return colored(string, "white") def detect_file_type(self, file): if str(file).split(".")[-1] == "rar": return "rar" elif str(file).split(".")[-1] == "zip": return "zip" elif str(file).split(".")[-1] == "pdf": return "pdf" else: return "text" def count_word(self, dict_file): count = 0 with open(dict_file, "r") as wordlist: for line in wordlist: count += 1 return count def count_possible_com(self, chars, min, max): x = min possible_com = 0 while x <= max: possible_com += len(chars)**x x += 1 return possible_com def counter(self, max_words): self.counter_lock.acquire() num = self.count.get() # print(self.count) # print(num) if num != 0: self.count.put(num - 1) current_word = max_words - int(num) + 1 percent = (100 * current_word) / max_words width = (current_word + 1) / (max_words / 42) # 100 / 25 bar = "\t" + self.white("Progress : [") + "#" * int(width) + " " * (42 - int(width)) \ + "] " + self.yellow(str("%.3f" % percent) + " %") # time.sleep(1) sys.stdout.write(u"\t\u001b[1000D" + bar) sys.stdout.flush() self.counter_lock.release() def handling_too_many_open_files_error(self): if self.process_count == self.limit_process: for x in self.threads: x.join() self.threads = [] self.limit_process += 500 def search_zip_pass(self, passwords_list, compress_file, max_words): try: temp_file = self.create_temporary_copy(compress_file, passwords_list[1]) for word in passwords_list: password = word.strip('\r').strip('\n') stop = self.stop.get() self.stop.put(stop) if stop is False: # if find password dont doing more self.counter(max_words) try: with zipfile.ZipFile(temp_file, "r") as zfile: zfile.extractall( pwd=bytes(password, encoding='utf-8')) self.stop.get() self.stop.put(True) time.sleep(3) print("\n\t" + self.green("[+] Password Found: " + password) + "\n") break except Exception as e: # print(e) pass else: break if os.path.isfile(temp_file): os.remove(os.path.abspath(temp_file)) # last_process_number = int(max_words / self.shot) + (max_words % self.shot > 0) if str(self.last_process_number) in str(current_process().name): time.sleep(20) stop = self.stop.get() self.stop.put(stop) if stop is False: print("\n\t" + self.red("[-] password not found") + "\n") else: pass self.process_lock.release() except KeyboardInterrupt: self.process_lock.release() def create_temporary_copy(self, file, word): name = self.name.get() name2 = str(word) self.name.put(name2) directory_path = "temp_directory" try: os.mkdir(directory_path) except FileExistsError: pass temp_file_name = "temp" + name + "." + self.file_type temp_file_path = directory_path + '/' + temp_file_name # linux path shutil.copy2(file, temp_file_path) return temp_file_path def delete_temporary_directory(self): if os.path.exists("temp_directory"): shutil.rmtree("temp_directory") def search_rar_pass(self, passwords_list, compress_file, max_words): try: temp_file = self.create_temporary_copy(compress_file, passwords_list[1]) for word in passwords_list: password = word.strip('\r').strip('\n') stop = self.stop.get() self.stop.put(stop) if stop is False: # if find password dont doing more self.counter(max_words) try: with rarfile.RarFile(temp_file, "r") as rfile: # print(password) very useful for trouble shooting rfile.extractall(pwd=password) self.stop.get() self.stop.put(True) time.sleep(3) print("\n\t" + self.green("[+] Password Found: " + password + '\n')) break except Exception as e: # print(e) pass else: break if os.path.isfile(temp_file): os.remove(os.path.abspath(temp_file)) # last_process_number = int(max_words / 500) + (max_words % 500 > 0) if str(self.last_process_number) in str(current_process().name): time.sleep(20) stop = self.stop.get() self.stop.put(stop) if stop is False: print("\n\t" + self.red("[-] password not found") + "\n") else: pass self.process_lock.release() except KeyboardInterrupt: self.process_lock.release() def search_pdf_pass(self, passwords_list, file, max_words): try: temp_file = self.create_temporary_copy(file, passwords_list[1]) for word in passwords_list: password = word.strip('\r').strip('\n') stop = self.stop.get() self.stop.put(stop) if stop is False: # if find password dont doing more self.counter(max_words) proc = subprocess.Popen([ 'qpdf', "--password="******"\n\t" + self.green("[+] Password Found: " + password)) print("\t" + self.blue("[*]") + self.white(" Your decrypted file is ") + self.bwhite(self.decrypted_file_name) + "\n") # self.end_time() break elif status == 2: pass else: break # for thread in self.threads: # print(thread) # last_process_number = int(max_words / 500) + (max_words % 500 > 0) if str(self.last_process_number) in str(current_process().name): time.sleep(20) stop = self.stop.get() self.stop.put(stop) if stop is False: print("\n\t" + self.red("[-] password not found") + "\n") else: pass self.process_lock.release() except KeyboardInterrupt: self.process_lock.release() def search_shadow_pass(self, passwords_list, salt_for_crypt, crypt_pass, max_words, user): try: for word in passwords_list: password = word.strip('\r').strip('\n') stop = self.stop.get() self.stop.put(stop) if stop is False: # if find password dont doing more self.counter(max_words) cryptword = crypt.crypt(password, salt_for_crypt) if cryptword == crypt_pass: self.stop.get() self.stop.put(True) time.sleep(4) print("\n\t" + self.green("[+] Password Found: " + password) + "\n") break else: pass else: break # print(last_process_number) # print(str(current_process().name)) if str(self.last_process_number) in str(current_process().name): time.sleep(20) stop = self.stop.get() self.stop.put(stop) if stop is False: print("\n\t" + self.red("[-] password not found") + "\n") else: pass self.process_lock.release() except KeyboardInterrupt: self.process_lock.release() def last_words_check(self, max_words, passwords_list, file): while True: if self.stop is True: exit(0) elif self.count == len(passwords_list): # self_cont kam mishe if self.file_type is "rar": self.search_rar_pass(passwords_list, file, max_words) if self.stop is False: print("\n\t" + self.red("[-] Password not found") + "\n") self.delete_temporary_directory() self.end_time() return else: pass def dict_guess_password(self, dict_file, file): last_check = 0 passwords_group = [] possible_words = self.count_word(dict_file) self.last_process_number = int( possible_words / self.shot) + (possible_words % self.shot > 0) self.count.put(possible_words) self.file_type = self.detect_file_type(file) self.fun("Starting password cracking for " + file) print("\n " + self.blue("[*]") + self.white(" Count of possible passwords: ") + self.bwhite(str(possible_words))) if self.file_type == "text": file = open(file) for line in file.readlines(): self.count.get() self.count.put(possible_words) crypt_pass = line.split(':')[1].strip(' ') if crypt_pass not in ['*', '!', '!!']: user = line.split(':')[0] print(" " + self.blue("[**]") + self.white(" cracking Password for: ") + self.bwhite(user)) algorythm = crypt_pass.split('$')[1].strip(' ') salt = crypt_pass.split('$')[2].strip(' ') salt_for_crypt = '$' + algorythm + '$' + salt + '$' with open(dict_file, "r") as wordlist: for word in wordlist: passwords_group.append(word) last_check += 1 self.handling_too_many_open_files_error() if (len(passwords_group) == self.shot) or (possible_words - last_check == 0): passwords = passwords_group passwords_group = [] self.process_lock.acquire() stop = self.stop.get() self.stop.put(stop) if stop is False: t = Process(target=self.search_shadow_pass, args=(passwords, salt_for_crypt, crypt_pass, possible_words, user)) self.threads.append(t) self.process_count += 1 t.start() else: self.process_lock.release() else: continue for x in self.threads: x.join() self.last_process_number *= 2 self.end_time() elif self.file_type == "zip": with open(dict_file, "r") as wordlist: for word in wordlist: passwords_group.append(word) last_check += 1 self.handling_too_many_open_files_error() if (len(passwords_group) == self.shot) or (possible_words - last_check == 0): passwords = passwords_group passwords_group = [] self.process_lock.acquire() stop = self.stop.get() self.stop.put(stop) if stop is False: t = Process(target=self.search_zip_pass, args=(passwords, file, possible_words)) self.threads.append(t) self.process_count += 1 t.start() else: self.process_lock.release() else: continue for x in self.threads: x.join() self.delete_temporary_directory() self.end_time() elif self.file_type == "pdf": self.decrypted_file_name = "decrypted_" + file.split('/')[-1] with open(dict_file, "r") as wordlist: for word in wordlist: passwords_group.append(word) last_check += 1 self.handling_too_many_open_files_error() if (len(passwords_group) == self.shot) or (possible_words - last_check == 0): passwords = passwords_group passwords_group = [] self.process_lock.acquire() stop = self.stop.get() self.stop.put(stop) if stop is False: t = Process(target=self.search_pdf_pass, args=(passwords, file, possible_words)) self.threads.append(t) self.process_count += 1 t.start() else: self.process_lock.release() else: continue for x in self.threads: x.join() self.delete_temporary_directory() self.end_time() elif self.file_type == "rar": with open(dict_file, "r") as wordlist: for word in wordlist: passwords_group.append(word) last_check += 1 self.handling_too_many_open_files_error() if (len(passwords_group) == self.shot) or (possible_words - last_check == 0): passwords = passwords_group passwords_group = [] self.process_lock.acquire() stop = self.stop.get() self.stop.put(stop) if stop is False: # ok finishing all process after finding password t = Process(target=self.search_rar_pass, args=(passwords, file, possible_words)) self.threads.append(t) self.process_count += 1 t.start() else: self.process_lock.release() else: continue for x in self.threads: x.join() self.delete_temporary_directory() self.end_time() def bruteforce_guess_password(self, chars, min, max, file): last_check = 0 passwords_group = [] possible_com = self.count_possible_com(chars, int(min), int(max)) self.last_process_number = int( possible_com / self.shot) + (possible_com % self.shot > 0) self.count.put(possible_com) self.file_type = self.detect_file_type(file) self.fun("Starting password cracking for " + file) print("\n " + self.blue("[*]") + self.white(" Count of possible passwords: ") + self.bwhite(str(possible_com))) if self.file_type == "text": file = open(file) for line in file.readlines(): self.count.get() self.count.put(possible_com) crypt_pass = line.split(':')[1].strip(' ') if crypt_pass not in ['*', '!', '!!']: user = line.split(':')[0] print(" " + self.blue("[**]") + self.white(" cracking Password for: ") + self.bwhite(user)) algorythm = crypt_pass.split('$')[1].strip(' ') salt = crypt_pass.split('$')[2].strip(' ') salt_for_crypt = '$' + algorythm + '$' + salt + '$' for password_length in range(int(min), int(max) + 1): for guess in itertools.product(chars, repeat=password_length): guess = ''.join(guess) passwords_group.append(guess) last_check += 1 self.handling_too_many_open_files_error() if (len(passwords_group) == self.shot) or (possible_com - last_check == 0): passwords = passwords_group passwords_group = [] self.process_lock.acquire() stop = self.stop.get() self.stop.put(stop) if stop is False: t = Process( target=self.search_shadow_pass, args=(passwords, salt_for_crypt, crypt_pass, possible_com, user)) self.threads.append(t) self.process_count += 1 t.start() else: self.process_lock.release() else: continue for x in self.threads: x.join() self.last_process_number *= 2 self.end_time() elif self.file_type == "zip": for password_length in range(int(min), int(max) + 1): for guess in itertools.product(chars, repeat=password_length): guess = ''.join(guess) passwords_group.append(guess) last_check += 1 self.handling_too_many_open_files_error() if (len(passwords_group) == self.shot) or (possible_com - last_check == 0): passwords = passwords_group passwords_group = [] self.process_lock.acquire() stop = self.stop.get() self.stop.put(stop) if stop is False: t = Process(target=self.search_zip_pass, args=(passwords, file, possible_com)) self.threads.append(t) self.process_count += 1 t.start() else: self.process_lock.release() else: continue for x in self.threads: x.join() self.delete_temporary_directory() self.end_time() elif self.file_type == "pdf": self.decrypted_file_name = "decrypted_" + file.split('/')[-1] for password_length in range(int(min), int(max) + 1): for guess in itertools.product(chars, repeat=password_length): guess = ''.join(guess) passwords_group.append(guess) last_check += 1 self.handling_too_many_open_files_error() if (len(passwords_group) == self.shot) or (possible_com - last_check == 0): passwords = passwords_group passwords_group = [] self.process_lock.acquire() stop = self.stop.get() self.stop.put(stop) if stop is False: t = Process(target=self.search_pdf_pass, args=(passwords, file, possible_com)) self.threads.append(t) self.process_count += 1 t.start() else: self.process_lock.release() else: continue for x in self.threads: x.join() self.delete_temporary_directory() self.end_time() elif self.file_type == "rar": for password_length in range(int(min), int(max) + 1): for guess in itertools.product(chars, repeat=password_length): guess = ''.join(guess) passwords_group.append(guess) last_check += 1 self.handling_too_many_open_files_error() if (len(passwords_group) == self.shot) or (possible_com - last_check == 0): passwords = passwords_group passwords_group = [] self.process_lock.acquire() stop = self.stop.get() self.stop.put(stop) if stop is False: # ok finishing all process after finding password t = Process(target=self.search_rar_pass, args=(passwords, file, possible_com)) self.threads.append(t) self.process_count += 1 t.start() else: self.process_lock.release() else: continue for x in self.threads: x.join() self.delete_temporary_directory() self.end_time() def make_chars(self, char_type): chartype_list = char_type.split(",") chars = "" for chartype in chartype_list: if chartype == "lowercase": chars += string.ascii_lowercase elif chartype == "uppercase": chars += string.ascii_uppercase elif chartype == "letters": chars += string.ascii_letters elif chartype == "digits": chars += string.digits elif chartype == "symbols": chars += string.punctuation elif chartype == "space": chars += " " else: return False return chars def banner(self): term.clear() term.pos(1, 1) # check if font "epic" exists on this system # sudo wget http://www.figlet.org/fonts/epic.flf -O /usr/share/figlet/epic.flf bannerfont = "epic" if os.path.exists( '/usr/share/figlet/epic.flf') else "banner" banner = pyfiglet.figlet_format("ZYDRA", font=bannerfont).replace( "\n", "\n\t\t", 7) cprint("\r\n\t" + "@" * 61, "blue", end="") cprint("\n\t\t" + banner + "\t\tAuthor : Hamed Hosseini", "blue", attrs=['bold']) cprint("\t" + "@" * 61 + "\n", "blue") def end_time(self): self.stop = True end_time_show = time.asctime() end_time = time.monotonic() execution_time = (timedelta(seconds=end_time - self.start_time)) print(self.blue("End time ==> ") + self.white(end_time_show)) print( self.blue("Execution time ==> ") + self.white(str(execution_time)) + "\n") term.saveCursor() term.pos(7, 15) term.writeLine("ok", term.green, term.blink) term.restoreCursor() exit(0) def main(self): start_time_show = time.asctime() usage = "%prog [options] [args]" \ "\n\nDictionary Mode:" \ "\n %prog -f <file> -d <wordlist>" \ "\n\nBrute force Mode:" \ "\n %prog -f <file> -b <char_type> -m <min_length> -x <max_length>" \ "\n\n Available char_type:" \ "\n\t<lowercase> The lowercase letters abcdefghijklmnopqrstuvwxyz" \ "\n\t<uppercase> The uppercase letters ABCDEFGHIJKLMNOPQRSTUVWXYZ" \ "\n\t<letters> The concatenation of the lowercase and uppercase"\ "\n\t<digits> numbers 0123456789" \ "\n\t<symbols> punctuation characters !#$%&'()*+,-./:;<=>?@[\]^_`{|}~'" + '"' \ "\n\t<space> space character" \ "\n You can select multiple character types." \ "\n\tExample: %prog -f <file> -b <space,digits> -m 1 -x 8" parser = optparse.OptionParser(usage) parser.add_option("-d", dest="dictfile", type='string', help="Specifies dictionary file") parser.add_option("-f", dest="file", type='string', help="Specifies the file") parser.add_option("-b", dest="chartype", type='string', help="Specifies the character type") parser.add_option("-m", dest="minlength", type='string', help="Specifies minimum length of password") parser.add_option("-x", dest="maxlength", type='string', help="Specifies maximum length of password") (options, args) = parser.parse_args() try: if options.file: if os.path.isfile(options.file): file = os.path.abspath(options.file) if options.dictfile: if os.path.isfile(options.dictfile): dictfile = os.path.abspath(options.dictfile) print( self.blue("Start time ==> ") + self.white(start_time_show) + "\n") self.dict_guess_password(dictfile, file) else: parser.error(" " + options.dictfile + " dictionary file does not exist") exit(0) elif options.chartype: chars = self.make_chars(options.chartype) if chars is False: parser.error( " " + options.chartype + " character type is not valid, Use --help for more info" ) if options.minlength is None: parser.error(" Enter minimum length of password") exit(0) if options.maxlength is None: parser.error(" Enter maximum length of password") exit(0) if options.minlength > options.maxlength: parser.error( " Min and Max must be numbers and Min must be \nless than Max or be the same" ", Use --help for more info") exit(0) else: print( self.blue("Start time ==> ") + self.white(start_time_show) + "\n") self.bruteforce_guess_password( chars, options.minlength, options.maxlength, file) else: parser.error( " Choose a wordlist or bruteforce method, Use --help for more info" ) exit(0) else: parser.error("" + options.file + " file does not exist") exit(0) else: parser.error(" Choose a file, Use --help for more info") exit(0) except KeyboardInterrupt: time.sleep(1) self.delete_temporary_directory() print( self.red("\n\n [-] Detected CTRL+C") + self.white("\n closing app...\n Finish\n")) # self.end_time() exit(0)
threads = "1" # Print header print "Concuerror's Testsuite (THREADS=%d)\n" % int(threads) print "%-8s %-63s %s" % \ ("Suite", "Module, Test (' '=Module), Bound (' '=inf), DPOR (' '=optimal)", "Result") print "-------------------------------------------------------------------------------" # Create share integers to count tests and # a lock to protect printings lock = Lock() total_tests = Value(c_int, 0, lock=False) total_failed = Value(c_int, 0, lock=False) sema = BoundedSemaphore(int(threads)) sema1 = BoundedSemaphore(int(threads)) # For every test do procT = [] for test in tests: p = Process(target=runTest, args=(test,)) procT.append(p) sema1.acquire() p.start() # Wait for p in procT: p.join() # Print overview print "\nOVERALL SUMMARY for test run" print " %d total tests, which contained" % len(tests)
target=testcondput, args=(notempty,l,1), name='Q1' ) thread1.daemon = True thread1.start() thread2 = threading.Thread( target=testcondput, args=(notempty,l,2), name='Q2' ) thread2.daemon = True thread2.start() sem = BoundedSemaphore(2) if sem.acquire(): thread1 = threading.Thread( target=bufftopipe, args=(mybuff,writer), name='Q1' ) thread1.daemon = True thread1.start() logging.info(thread1.name+'start!') else: logging.info('thread full!') if sem.acquire(): thread2 = threading.Thread( target=bufftopipe,
threads = "4" # Print header print "Concuerror's Testsuite (%d threads)\n" % int(threads) print "%-10s %-20s %-50s %s" % \ ("Suite", "Module", "(Test, Bound, DPOR)", "Result") print "---------------------------------------------" + \ "---------------------------------------------" # Create share integers to count tests and # a lock to protect printings lock = Lock() total_tests = Value(c_int, 0, lock=False) total_failed = Value(c_int, 0, lock=False) sema = BoundedSemaphore(int(threads)) # For every test do procT = [] for test in tests: p = Process(target=runTest, args=(test, )) p.start() procT.append(p) # Wait for p in procT: p.join() # Print overview print "\nOVERALL SUMMARY for test run" print " %d total tests, which gave rise to" % len(tests) print " %d test cases, of which" % total_tests.value
import subprocess import os import sys from multiprocessing import BoundedSemaphore # To share 'semaphore' among multiple workers on gunicorn, use '--preload' option semaphore = BoundedSemaphore(int(os.environ.get('APP_WEBPACK_LIMIT', '2'))) class _WPManager(): """ Used in 'WebpackManager'. """ def __init__(self): self.semaphore = semaphore def run(self, folder, instance_path, fname): if not self.semaphore.acquire(False): return False # Parent process: return 'True' immediately if fork is succeeded # Child process : executes 'webpack', releases semaphore and exits try: pid = os.fork() if pid == 0: # Child (cwd, cmd) = self.gen_command(instance_path) my_env = self.gen_exec_env(folder, fname) proc = subprocess.Popen(cmd, env=my_env, cwd=cwd) outs, errs = proc.communicate() else:
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import math, os, sys, random from multiprocessing import Process, BoundedSemaphore, Value, Array INBUF = [] OUTBUF = None sem = BoundedSemaphore(8) def f(x, N): return x * N - x * (x - 1) // 2 def single_test(INBUF, OUTBUF, INDEX): sem.acquire() result = 0 N, M, data = INBUF[INDEX] enter = {} leave = {} events = set() cost = 0 for d in data: cost += d[2] * f(d[1] - d[0], N) cost %= 1000002013 if d[0] not in events: events.add(d[0]) if d[0] not in enter: enter[d[0]] = d[2]
def non_critic_section(): p = current_process() for i in range(N_non_critic): print p.name, "in non critic section", "(%i/%i)" % (i, N_non_critic) delay() def critic_section(): p = current_process() for i in range(N_critic): print p.name, "in CRITIC section", "(%i/%i)" % (i, N_critic - 1) delay() def task(semaphore): non_critic_section() semaphore.acquire() critic_section() semaphore.release() if __name__ == '__main__': names = ["Ana", "Eva", "Pi", "Pam", "Pum"] jobs = [] K = 2 semaphore = BoundedSemaphore(K) for x in names: jobs.append(Process(target=task, name=x, args=(semaphore, ))) for p in jobs: p.start()
from multiprocessing import BoundedSemaphore if __name__ == '__main__': bsem = BoundedSemaphore(3) bsem.acquire() bsem.release() bsem.release() # 在 Mac OS 平台上不会抛出异常 """ Traceback (most recent call last): File "D:/code/picpython/pp_046_多进程同步之BoundedSemaphore.py", line 8, in <module> bsem.release() ValueError: semaphore or lock released too many times """
tomar.acquire() print current_process().name, "desalmacenando", almacenar.acquire() dato = almacen[0] indice.value = indice.value - 1 delay() for i in range(indice.value): almacen[i] = almacen[i + 1] almacenar.release() poner.release() print current_process().name, "consumiendo", dato if __name__ == "__main__": poner = BoundedSemaphore(K) tomar = BoundedSemaphore(K) for i in range(K): tomar.acquire() almacenar = Lock() almacen = Array('i', K) indice = Value('i',0) print "almacen inicial", almacen[:], "indice", indice.value productor = Process(target=p, name="productor", args=(almacen,indice,poner,tomar,almacenar)) consumidor = Process(target=c, name="consumidor", args=(almacen,indice,poner,tomar,almacenar)) productor.start()
sense.show_message(humidity, scroll_speed = 0.1,text_colour=[100,150,150]) num = 1 elif(event.direction[:1] == "m" and num == 0): sense.show_message("Bye", scroll_speed = 0.1,text_colour=[100,150,150]) num = 1 isRunning = False t1.terminate() semaphore.release() elif(num == 1): num = 0 semaphore.release() time.sleep(1) sense.clear() c = getMatrix() sense = SenseHat() sense.clear() sense.set_rotation(180) sense.low_light = True semaphore = BoundedSemaphore(value=1) t1 = Process(name="getColor",target=getColor) t2 = Process(name="main",target=myMainLogic) #t1.daemon = True #t2.daemon = True t1.start() t2.start()
def process(self, input_filepath, msi_loci, config): self.__reset() # Generate dictionary read counts for loci counts = {} loci = [] for locus in msi_loci: counts[locus.locus()] = {} loci.append(locus) # Generate input and output queues and (mutex) semaphores # for each. queue_out = Queue() queue_in = Queue() queue_full = BoundedSemaphore(100) full = Semaphore(0) empty = BoundedSemaphore(40) mutex_out = Semaphore(1) mutex_in = Semaphore(1) # Set amount of consumer threads; minimum one. consumer_threads = config['threads'] - 1 if consumer_threads < 1: consumer_threads = 1 # Create producer thread; currently only using single thread # since I/O is more of the limiter than CPU bound processes. self.__producer = Process(target=self.extract_reads, args=(input_filepath, msi_loci, full, empty, mutex_out, queue_in, consumer_threads)) self.__producer.start() # Spawn the set amount of threads/processes if self.debug_output: tprint('Main> Generating {0} analyzer process(es).'.format( consumer_threads)) for i in range(0, consumer_threads): p = Process(target=self.read_analyzer, args=(queue_in, queue_out, full, empty, mutex_in, mutex_out, queue_full)) self.__consumers.append(p) self.__consumers[-1].start() # Iterate through the loci, fetching any reads and pushing them to # the pool of threads, collecting the output as they process it. query_delay = 0.050 # In seconds loop_counter = 0 proc_check_interval = 100 while (not queue_out.empty() or self.has_live_threads()): # Sleep for the set amount of time so the queue isn't constantly # getting hammered with queries time.sleep(query_delay) loop_counter += 1 if loop_counter % proc_check_interval is 0: # Time to check that the consumers # didn't die while the producer is still producing mutex_out.acquire() self.status_check(queue_out.qsize()) mutex_out.release() while not queue_out.empty(): # There is data on the queue to be processed; # the return from the queue should be a tuple # with (locus, repeat_count) mutex_out.acquire() result = queue_out.get() locus = result[0] repeat_count = result[1] if repeat_count >= 0: if locus not in counts: counts[locus] = {} if repeat_count not in counts[locus]: counts[locus][repeat_count] = 0 counts[locus][repeat_count] += 1 mutex_out.release() queue_full.release() if not self.has_live_threads(): # All processes should have terminated. if self.debug_output: tprint('Main> All processes complete.') break # end while loop return counts
from structlog import get_logger import cassandra.cluster import cassandra.auth from cassandra.query import BatchStatement, tuple_factory from multiprocessing import BoundedSemaphore from data.cassandra_wrapper.model import FIELDS_Quote, FIELDS_Trades, FIELDS_Trades_min, FIELDS_Trades_over_under, \ FIELDS_Trades_over_under_basic from common import singleton, get_config, process_singleton MAX_PARALLEL_QUERIES = 256 QUOTE_SAMPLINGS = ('raw', 'sec', 'sec_shift', 'min', 'hr') MAX_BATCH_SIZE = 10 _query_parallel_sema = BoundedSemaphore(MAX_PARALLEL_QUERIES) _cassandra_enabled = True @process_singleton def get_cassandra_session(): global _cassandra_enabled config = get_config() hostname = config.get('cassandra', 'hostname') username = config.get('cassandra', 'username') password = config.get('cassandra', 'password') keyspace = config.get('cassandra', 'keyspace')