def __init__(self, dataname_tuples, pdgIDs, nWorkers, num_loaders, filters=[]): self.dataname_tuples = sorted(dataname_tuples) self.nClasses = len(dataname_tuples[0]) self.total_files = len(dataname_tuples) # per class self.num_per_file = len(dataname_tuples) * [0] self.num_loaders = num_loaders self.lock = RLock() self.fileInMemory = Value('i', 0, lock=self.lock) self.fileInMemoryFirstIndex = Value('i', 0, lock=self.lock) self.fileInMemoryLastIndex = Value('i', -1, lock=self.lock) self.mem_index = Value('i', 1) # either 0 or 1. used for mem management. self.loadNext = Event() self.loadFile = Event() self.load_barrier = Barrier(self.num_loaders + 1) self.batch_barrier = Barrier(nWorkers - (self.num_loaders + 1)) self.worker_files = [ RawArray(ctypes.c_char, len(dataname_tuples[0][0]) + 50) for _ in range(self.num_loaders) ] self.data = {} ########################################### # prepare memory to share with workers # # take a sample file and get keys and over allocate # what if we overallocate for both classes? # we should overallocate for both classes # if user runs into memory problems, use fewer num_loaders. with h5py.File(dataname_tuples[0][0]) as sample: for key in sample.keys(): # print(key) old_shape = sample[key].shape size = self.nClasses * self.num_loaders self.new_shape = list(old_shape) for dim in old_shape: size *= dim self.new_shape[ 0] = self.nClasses * self.num_loaders * old_shape[0] buff = RawArray(ctypes.c_float, size) # prepare mem for num_loaders self.data[key] = np.frombuffer(buff, dtype=np.float32).reshape( self.new_shape) # map numpy array on buffer classID_buff = RawArray( ctypes.c_int, (2 * self.nClasses * self.num_loaders * 200)) # print(classID_buff) self.data['classID'] = np.frombuffer( classID_buff, dtype=np.int) #.reshape(self.nClasses*self.num_loaders*200) # print(self.data['classID'].shape) ########################################### self.pdgIDs = {} self.filters = filters for i, ID in enumerate(pdgIDs): self.pdgIDs[ID] = i self.countEvents()
def start_fetch(config): """Launch workers and consumer. * Workers : one by url. The worker fetch a url and return fetch time * consumer : Just one. Get datas returned by workers and write them to csv files""" # Create queues result_queue = Queue() # for results stop_process = Value('i', 0) # Integer shared value # Start fetch all urls for url_config in config.get('urls'): # Launch workers : process who fetch website and push result in result_queue Process(target=worker, args=(stop_process, result_queue, config, url_config)).start() # Launch consumer : process that write results from result_queue in csv files consumer_process = Process(target=consumer, args=(stop_process, result_queue, config)) consumer_process.start() # run forever try: consumer_process.join() #while True: # time.sleep(0.5) except KeyboardInterrupt: pass finally: stop_process.value = 1
def fit_regression_models(expression, expression_indices): pred_expression = expression[:, (0, 1, 2, 3, 4, 5)] resp_expression = expression[:, (0, 1, 2, 3, 4, 5)] shared_index = Value('i', 0) pids = [] with ThreadSafeFile("output_noshift.txt", "w") as ofp: for p_index in xrange(NTHREADS): pid = os.fork() if pid == 0: while True: with shared_index.get_lock(): i = shared_index.value if i >= len(expression): break shared_index.value += 1 alpha, nonzero_coefs = estimate_covariates( pred_expression, resp_expression, i) output_str = "{}\t{}\t{}\n".format( expression_indices[i], alpha, "\t".join( str(expression_indices[x]) for x in nonzero_coefs)) print output_str, ofp.write(output_str) sys._exit() else: pids.append(pid) try: for pid in pids: os.waitpid(pid, 0) except: for pid in pids: os.kill(pid, signal.SIGTERM) raise
def __init__(self, handle_events=False, event_window=(0.0, 10000.0), position_offset=(0, 0), orientation=PixelOrientation.Up, input_queue=None, create_output=True, num_outputs=1, shared_output=None): BasePipelineObject.__init__(self, PacketProcessor.__name__, input_queue=input_queue, create_output=create_output, num_outputs=num_outputs, shared_output=shared_output) self.clearBuffers() self._orientation = orientation self._x_offset, self._y_offset = position_offset self._trigger_counter = 0 self._handle_events = handle_events min_window = event_window[0] max_window = event_window[1] self._min_event_window = Value('d', min_window) self._max_event_window = Value('d', max_window)
def add_export(self, export_range, export_dir): if system() == 'Darwin': set_start_method('spawn') logger.debug("Adding new video export process.") should_terminate = Value(c_bool, False) frames_to_export = Value(c_int, 0) current_frame = Value(c_int, 0) rec_dir = self.g_pool.rec_dir user_dir = self.g_pool.user_dir start_frame = export_range.start end_frame = export_range.stop + 1 #end_frame is exclusive frames_to_export.value = end_frame - start_frame # Here we make clones of every plugin that supports it. # So it runs in the current config when we lauch the exporter. plugins = self.g_pool.plugins.get_initializers() out_file_path = verify_out_file_path(self.rec_name, export_dir) process = Export_Process( target=export, args=(should_terminate, frames_to_export, current_frame, rec_dir, user_dir, self.g_pool.min_data_confidence, start_frame, end_frame, plugins, out_file_path)) self.new_export = process
def __init__(self): self.array = [Array('c', MyQueue.QUEUE_BLOCK_SIZE, lock=False) \ for q in range(MyQueue.QUEUE_SIZE)] self.name = Array('c', MyQueue.MAX_QUEUE_NAME, lock=False) self.send_pos = Value('i', -1, lock=False) self.recv_pos = Value('i', -1, lock=False) self.occupied_flag = Value('i', -1, lock=False) self.slock = multiprocessing.Lock() self.rlock = multiprocessing.Lock()
def __init__(self, buffer_size, dataset_size): self.buffer_size = buffer_size self.dataset_size = dataset_size self.put_index = Value('i', 0) self.get_index = Value('i', 0) self.put_lock = mp.Lock() self.cdatasets = Array('d', [0.0] * self.buffer_size * self.dataset_size) self.cbuffer = self.cdatasets._obj._wrapper
def __init__(self, maxlen: int = 100, timeframe: float = 10.0): assert maxlen > 0 self.__lock = RLock() self.__timestamps = Array(Cell, [(0.0, 0.0)] * maxlen, lock=self.__lock) self.__index = Value('i', 0, lock=self.__lock) self.__start = Value('i', 0, lock=self.__lock) self.__length = Value('i', 0, lock=self.__lock) self.__maxlen = maxlen self.__timeframe = timeframe
class Frame(object): def __init__(self, width, height, channels, array_type_code): self.__lock = RLock() self.__header = Value(Header, width, height, channels, 0, lock=self.__lock) self.__image = Array(array_type_code, self.__header.width * self.__header.height * channels, lock=self.__lock) self.__latch = StateLatch(State.READY, self.__lock) def copy(self, dst): memmove(addressof(dst.image.get_obj()), addressof(self.__image.get_obj()), sizeof(self.__image.get_obj())) memmove(addressof(dst.header.get_obj()), addressof(self.__header.get_obj()), sizeof(self.__header.get_obj())) def clear(self): self.__header.epoch = 0 memset(addressof(self.__image.get_obj()), 0, sizeof(self.__image.get_obj())) memset(addressof(self.__header.detections), 0, sizeof(self.__header.detections)) @property def lock(self): return self.__lock @property def header(self): return self.__header @property def image(self): return self.__image @property def latch(self): return self.__latch def get_numpy_image(self, dtype=None): """# Get numpy image from buffer. """ image_shape = (self.header.height, self.header.width, self.header.channels) image_np = frombuffer(self.image.get_obj(), dtype).reshape(image_shape) return image_shape, image_np
def init_marker_cacher(self): if system() == 'Darwin': forking_enable(0) from marker_detector_cacher import fill_cache visited_list = [False if x == False else True for x in self.cache] video_file_path = self.g_pool.capture.source_path timestamps = self.g_pool.capture.timestamps self.cache_queue = Queue() self.cacher_seek_idx = Value('i',0) self.cacher_run = Value(c_bool,True) self.cacher = Process(target=fill_cache, args=(visited_list,video_file_path,timestamps,self.cache_queue,self.cacher_seek_idx,self.cacher_run,self.min_marker_perimeter_cacher)) self.cacher.start()
def __init__(self, width, height, channels, array_type_code): self.__lock = RLock() self.__header = Value(Header, width, height, channels, 0, lock=self.__lock) self.__image = Array(array_type_code, self.__header.width * self.__header.height * channels, lock=self.__lock) self.__latch = StateLatch(State.READY, self.__lock)
def run_multiprocess(start_date: Union[str, None] = None, end_date: Union[str, None] = None): """main function, orchestrate file downloader process and file analysis process Keyword Arguments: start_date {str, None} -- start date as a string, if None it is set to utcnow minus 24 hours (default: {None}) end_date {str, None} -- end date as a string, if None function returns only one URL for the start date (default: {None}) """ # get urls to download urls = parse_dates(start_date, end_date) with Manager() as manager: # this queue will pass names of downloaded files from the download process # to the file analysis process queue = manager.Queue() # fill the queue with gzip files that have already been downloaded from tmp fill_queue_from_tmp(queue) # the downloader sets this value to True so that the file analyzer # knows that there will be no more filenames added to the Queue downloads_done = Value('b', False) # flag that kills all processes should one fail process_killswitch = Value('b', False) # set up the file download process download_process = Process(target=async_download, args=(urls, queue, downloads_done, DEFAULT_NUM_DOWNLOADERS, process_killswitch)) # set up the file analysis process fileread_processes = [ Process(target=analyze_from_queue, args=(queue, downloads_done, process_killswitch)) for _ in range(DEFAULT_NUM_FILE_PROCESSORS) ] # start the processes download_process.start() for fp in fileread_processes: fp.start() # wait for the processes to finish download_process.join() for fp in fileread_processes: fp.join()
def spawn_processes(args): p_lock = mp.Lock() num_files = Value("i", 0, lock=False) idx = Value("i", -1, lock=False) stars = Value("i", int(1e6), lock=False) pool = [ mp.Process(target=spawn_threads, args=(p_lock, num_files, idx, stars, args)) for _ in range(args["num_processes"]) ] for p in pool: p.start() for p in pool: p.join()
def __init__(self, dataname_tuples, pdgIDs, filters=[]): self.dataname_tuples = sorted(dataname_tuples) self.nClasses = len(dataname_tuples[0]) self.total_files = len(dataname_tuples) # per class self.num_per_file = len(dataname_tuples) * [0] self.fileInMemory = Value('i', -1) self.fileInMemoryFirstIndex = Value('i', 0) self.fileInMemoryLastIndex = Value('i', -1) self.loadNext = Value('i', 1) self.data = {} self.pdgIDs = {} self.filters = filters for i, ID in enumerate(pdgIDs): self.pdgIDs[ID] = i self.countEvents()
def __init__( self, name, input_queue=None, create_output=True, num_outputs=1, shared_output=None, propogate_input=True, ): ProcessLogger.__init__(self, name) multiprocessing.Process.__init__(self) self.input_queue = input_queue self.output_queue = [] self._propgate_input = propogate_input if shared_output is not None: self.debug("Queue is shared") if type(shared_output) is list: self.debug("Queue {} is a list") self.output_queue.extend(shared_output) else: self.output_queue.append(shared_output) elif create_output: self.debug("Creating Queue") for x in range(num_outputs): self.output_queue.append(Queue()) self._enable = Value("I", 1)
def __init__(self, n, mass, RxInit, RyInit, VxInit, VyInit, AxInit, AyInit, interactions): self.N = n self.M = Value('d', mass, lock=False) self.R = Array('d', 3 * (self.N + 1)) self.V = Array('d', 3 * (self.N + 1)) self.A = Array('d', 3 * (self.N + 1)) arrR = np.frombuffer( self.R.get_obj()) # mp_arr and arr share the same memory self.bR = arrR.reshape( (3, self.N + 1)) # b and arr share the same memory self.bR[0, 0] = RxInit self.bR[1, 0] = RyInit arrV = np.frombuffer( self.V.get_obj()) # mp_arr and arr share the same memory self.bV = arrV.reshape( (3, self.N + 1)) # b and arr share the same memory self.bV[0, 0] = VxInit self.bV[1, 0] = VyInit arrA = np.frombuffer( self.A.get_obj()) # mp_arr and arr share the same memory self.bA = arrA.reshape( (3, self.N + 1)) # b and arr share the same memory self.bA[0, 0] = AxInit self.bA[1, 0] = AyInit #self.R = np.zeros(shape = (3, self.N+1)) #self.V = np.zeros(shape = (3, self.N+1)) #self.A = np.zeros(shape = (3, self.N+1)) #self.R[0,0] = RxInit #self.R[1,0] = RyInit #self.V[0,0] = VxInit #self.V[1,0] = VyInit #self.A[0,0] = AxInit #self.A[1,0] = AyInit self.Interactions = interactions
def __init__(self, num_workers, state_transforms, env_factory, *env_args): self.manager: Manager = Manager() self.unique_set = self.manager.dict() self.worker_stat = Value(WorkerStat, 0, 0, 0, 0, 0, 0) self.vector_stat = VectorStat() self.remotes, work_remotes = zip(*[Pipe() for _ in range(num_workers)]) self.transforms = state_transforms self.ps = [] for (work_remote, remote) in zip(work_remotes, self.remotes): self.ps.append( Process(target=worker, args=(work_remote, remote, env_factory, self.worker_stat, self.unique_set))) for p in self.ps: p.start() for remote in work_remotes: remote.close() for remote in self.remotes: remote.send(('init', env_args)) self.waiting = False self.closed = False self.remotes[0].send(('get_spaces', None)) self.observation_space, self.action_space = self.remotes[0].recv() # do not hang in case of error self._cleaner = _ParallelEnvironmentCleaner(self) self.dtype = torch.float
def __init__(self, test, split, proc_num, max_utilization, cache=None, id_prefix=()): """ Initialize an AbstractParallelDD object. :param test: A callable tester object. :param split: Splitter method to break a configuration up to n parts. :param proc_num: The level of parallelization. :param max_utilization: The maximum CPU utilization accepted. :param cache: Cache object to use. :param id_prefix: Tuple to prepend to config IDs during tests. """ cache = cache or shared_cache_decorator(ConfigCache)() AbstractDD.__init__(self, test, split, cache=cache, id_prefix=id_prefix) self._proc_num = proc_num self._max_utilization = max_utilization self._fail_index = Value('i', -1, lock=False)
def __init__(self): self.data_dir = '' self.name = '' self.logs = {} self.registered_parts = {} self.level = logging.INFO self.is_force_level = False # is the currently level is a force one or not (force by API or by CLI args for example) self.linkify_methods() # We will keep last 20 errors self.last_errors_stack_size = 20 self.last_errors_stack = { 'DEBUG': [], 'WARNING': [], 'INFO': [], 'ERROR': [] } self.last_date_print_time = 0 self.last_date_print_value = '' self.last_rotation_day = Value( c_int, 0) # shared epoch of the last time we did rotate, round by 86400 # Log will be protected by a lock (for rotating and such things) # WARNING the lock is link to a pid, if used on a sub process it can fail because # the master process can have aquire() it and so will never unset it in your new process self.log_lock = None self.current_lock_pid = os.getpid()
def __init__(self, spidr_device, data_queue, pipeline_class=PixelPipeline): self._device = spidr_device Logger.__init__(self, "Timepix " + self.devIdToString()) self._data_queue = data_queue self._udp_address = (self._device.ipAddrDest, self._device.serverPort) self.info("UDP Address is {}:{}".format(*self._udp_address)) self._pixel_offset_coords = (0, 0) self._device.reset() self._device.reinitDevice() self._longtime = Value("L", 0) self.setupAcquisition(pipeline_class) self._initDACS() self._event_callback = None self._run_timer = True self._pause_timer = False self.setEthernetFilter(0xFFFF) # Start the timer thread self._timer_thread = threading.Thread(target=self.update_timer) self._timer_thread.daemon = True self._timer_thread.start() self.pauseHeartbeat() self._acq_running = False
def __init__(self,usermodel,Nlive=1024,maxmcmc=4096,output=None,verbose=1,seed=1,prior_sampling=False,stopping=0.1): """ Initialise all necessary arguments and variables for the algorithm """ self.model=usermodel self.prior_sampling = prior_sampling self.setup_random_seed(seed) self.verbose = verbose self.accepted = 0 self.rejected = 1 self.queue_counter = 0 self.Nlive = Nlive self.Nmcmc = maxmcmc self.maxmcmc = maxmcmc self.params = [None] * self.Nlive self.tolerance = stopping self.condition = np.inf self.worst = 0 self.logLmax = -np.inf self.iteration = 0 self.nested_samples=[] self.logZ=None self.state = _NSintegralState(self.Nlive) sys.stdout.flush() self.output_folder = output self.output,self.evidence_out,self.checkpoint = self.setup_output(output) header = open(os.path.join(output,'header.txt'),'w') header.write('\t'.join(self.model.names)) header.write('\tlogL\n') header.close() self.logLmin = Value(c_double,-np.inf,lock=Lock())
def resizing_data(input_data, width, height): resized_data = [] def read_imagecv2(img, counter): img = cv2.resize(img, (width, height)) resized_data.append(img) with counter.get_lock( ): #processing pools give no way to check up on progress, so we make our own counter.value += 1 # start 4 worker processes with Pool( processes=2 ) as pool: #this should be the same as your processor cores (or less) counter = Value( c_int, 0 ) #using sharedctypes with mp.dummy isn't needed anymore, but we already wrote the code once... chunksize = 4 #making this larger might improve speed (less important the longer a single function call takes) resized_test_data = pool.starmap_async( read_imagecv2, ((img, counter) for img in input_data), chunksize) #how many jobs to submit to each worker at once while not resized_test_data.ready( ): #print out progress to indicate program is still working. #with counter.get_lock(): #you could lock here but you're not modifying the value, so nothing bad will happen if a write occurs simultaneously #just don't `time.sleep()` while you're holding the lock print("\rcompleted {} images ".format(counter.value), end='') time.sleep(.5) print('\nCompleted all images') return resized_data
def run_command(self, command, *args, **opts): if len(self.members) <= 0: raise TomcatError("Cluster has no members") hosts = opts.setdefault('hosts', self.members.keys()) threads = opts.setdefault('threads', min(self.member_count(), self.max_threads)) abort_on_error = opts.setdefault('abort_on_error', False) if abort_on_error: abort = Value('b', 0) def run_cmd(host): try: if abort_on_error and abort.value: raise TomcatError('Aborted') self.log.debug("Performing %s%s on %s", command, args, host) self._run_progress_callback(event=events.CMD_START, command=command, args=args, node=host) rv = getattr(self.members[host], command)(*args) self._run_progress_callback(event=events.CMD_END, command=command, args=args, node=host) except Exception as e: if abort_on_error: abort.value = True rv = e return (host, rv) pool = ThreadPool(processes=threads) return ClusterCommandResults(pool.map(run_cmd, hosts))
def runCmd(self, exe, bypass, tail=Array('c', ' ' * 10000), code=Value('i', 0)): if bypass: proc = sp.Popen(exe, bufsize=1024, stdout=sp.PIPE, stderr=sp.PIPE, shell=True) t1 = threading.Thread(target=self.bufferScreen, args=(proc.stdout, )) t1.start() t1.join() proc.wait() code.value = proc.returncode if code.value != 0 and tail.value.strip() == '': tail.value = 'I was only able to capture the following execution error while executing the following:\n'+exe+'\n... you may wish to re-run without bypass option.'+ \ '\n'+'~'*18+'\n'+str(proc.stderr.read().strip())+'\n'+'~'*18 self.tail = self.tail + '\n' + tail.value else: code.value = sp.call(exe, shell=True) if code.value != 0: tail.value = '... The following command failed for the reason above (or below)\n' + exe + '\n' self.tail = self.tail + '\n' + tail.value return self.tail, code.value
def __init__(self, ctrl_url, task_server_url, num_workers='-1'): self.ctrl_url = ctrl_url self.task_server_url = task_server_url self.num_workers = (multiprocessing.cpu_count() if num_workers == '-1' else int(num_workers)) self.executing = Value('i', 0) self.pid = os.getpid()
def __init__(self, worker, writer, threads=4): self.worker=worker self.writer=writer self.q=JoinableQueue() self.done = Value(c_bool,False) self.consumer=Process(target=self.consume) self.pool = Pool(threads, init_opener)
def run_sequentially(vector): """Execute process of finding vector's max norm sequentially.""" max_norm = Value('i', 0) process = Process(target=find_max_norm, args=(max_norm, vector)) process.start() process.join() return max_norm.value
class Worker: status = Value(c_int) def __init__(self): super().__init__() self.tq = Queue() self.dq = Queue() self.st = Value(c_int) def proc(self): input = self.tq output = self.dq val = self.st # v_char for func, args in iter(input.get, 'STOP'): result, val.value = Worker.calculate(func, args) # queues.str.value[:] = bytearray(result.encode()) output.put(result) @classmethod def calculate(cls, func, args): result = func(*args) cls.status.value = result return 'i:%d %s says that %s%s = %s' % \ (cls.status.value, current_process().name, func.__name__, args, result), result
def main(): #global udpProcess # try to kill updprocess using startTkinter lock = Lock() n = Array('i', [0]*10, lock = lock) #Packet Storage Array for transfer between processes Log_names = startLogging = Value('i', 0, lock = lock) stopLogging = Value('i', 1, lock = lock) print 'Start Bool: ' + str(startLogging.value) + '\n' print 'Stop Bool: ' + str(stopLogging.value) + '\n' udpProcess = Process(name = 'UDP Process', target = UDP, args=(n,startLogging,stopLogging)) TkinterProcess = Process(name='Tkinter Process', target=startTkinter, args=(n,startLogging,stopLogging)) # broadcastProcess = Process(name='Broadcasting Process', target=broadcast) udpProcess.start() TkinterProcess.start() udpProcess.join() TkinterProcess.join() print 'End Packets: ' + str(n[:]) #The final packet after both processes have ended
def __init__(self, mabx_ready, vehicle_ready, test_type): self.mabx_ready = mabx_ready self.vehicle_ready = vehicle_ready self.test_type = test_type self.odometer = Value(c_uint, 45987) self.mode = Value(c_int, 2) self.mabx_log_playing = False self.vehicle_log_playing = False if self.mabx_ready: self.mabx_process = MABXCanSender(self.mode, self.test_type, False) self.mabx_process.start() if self.vehicle_ready: self.vehicle_process = VehicleCanSender(self.odometer, self.test_type, False) self.vehicle_process.start()
def __init__(self): # Initialisation de la socket serveur self.TEMPS_MAX=30 self.TAILLE_BLOC=4096 self.sock = socket(AF_INET, SOCK_STREAM) self.sock .setsockopt(SOL_SOCKET, SO_BROADCAST, 1) self.sock.bind(("",8000)) self.sock.listen(26) # Attributs self.partie_en_cours=Value('i', 0) self.manager= Manager() self.connexions = self.manager.dict() self.data = self.manager.Queue() print("Lancement du serveur de Quizz") self.run()
def add_export(self,export_range,export_dir): if system() == 'Darwin': set_start_method('spawn') logger.debug("Adding new video export process.") should_terminate = Value(c_bool,False) frames_to_export = Value(c_int,0) current_frame = Value(c_int,0) rec_dir = self.g_pool.rec_dir user_dir = self.g_pool.user_dir start_frame= export_range.start end_frame= export_range.stop+1 #end_frame is exclusive frames_to_export.value = end_frame-start_frame # Here we make clones of every plugin that supports it. # So it runs in the current config when we lauch the exporter. plugins = self.g_pool.plugins.get_initializers() out_file_path=verify_out_file_path(self.rec_name,export_dir) process = Export_Process(target=export, args=(should_terminate,frames_to_export,current_frame, rec_dir,user_dir,self.g_pool.min_data_confidence,start_frame,end_frame,plugins,out_file_path)) self.new_export = process
from WordList import * from django.utils import simplejson from multiprocessing.sharedctypes import Value # wl = WordList() # ret = wl.createIndex() # print('Return:%d' % ret) # retWord = wl.search('WTF') # print('Return:%s' % retWord) wl = Value(WordList) wl.createIndex() rtnMsg = wl.searchPy(15.123, 100.456, "i am the key words") print rtnMsg jsonMsg = simplejson.dumps(rtnMsg, ensure_ascii=False) print jsonMsg
for i in range(len(all_SF_data[0])): if first == 1: first = 0 continue temp_cancer.append(float(all_SF_data[0][i][1])) temp_non_cancer.append(float(all_SF_data[0][i][2])) SF_cancer_data = temp_cancer SF_non_cancer_data = temp_non_cancer breathing_rate = float(1.6) #we create the queue for data processing tasks_to_be_done = mp.Queue() #we will fill this one in the main process tasks_to_be_written = mp.Queue() #we will fill this one in the processing childs task_for_cumul = mp.Queue() #we will fill this one in the writter child kill_switch_processing = Value(ctypes.c_int,0) kill_switch_writing = Value(ctypes.c_int,0) kill_switch_cumul = Value(ctypes.c_int,0) nb_cpu = mp.cpu_count() nb_iter = len(all_SF_data[0]) - 2 #we start the processes so that they will start working while the queue is filled time_run = time.clock() print "starting processes" writer = mp.Process(target=writing, name = "writer_main", args = (tasks_to_be_written, kill_switch_writing, time_stamp)) writer.start() cumulation = mp.Process(target=cumul, name = "cumul_main", args = (time_stamp,nb_iter,IO_sectors_list,task_for_cumul, kill_switch_cumul)) cumulation.start() print nb_cpu