def ping(grid, curr_pos, goals): shared_array_base = _Array(ctypes.c_int, len(goals) * 3) result = _np.ctypeslib.as_array(shared_array_base.get_obj()) result = result.reshape(len(goals), 3) processes = [ _Process(target=a_star_search, args=(result[i], grid, curr_pos, goal)) for i, goal in enumerate(goals) ] for p in processes: p.start() for p in processes: p.join() cost = MAX_COST index = -1 for i, x in enumerate(result): if x[2] < cost and x[2] > 0: #and (x[0],x[1] in valid): cost = x[2] index = i next_move = (result[index][0], result[index][1]) move = get_dir(curr_pos, next_move) return move
def ping(grid, curr_pos, goals): shared_array_base = _Array(ctypes.c_int, len(goals)*3) result = _np.ctypeslib.as_array(shared_array_base.get_obj()) result = result.reshape(len(goals), 3) processes = [ _Process(target=a_star_search, args=(result[i], grid, curr_pos, goal)) for i, goal in enumerate(goals) ] for p in processes: p.start(); for p in processes: p.join(); cost = MAX_COST index = -1 for i,x in enumerate(result): if x[2] < cost and x[2] > 0: #and (x[0],x[1] in valid): cost = x[2] index = i next_move = (result[index][0], result[index][1]) move = get_dir(curr_pos, next_move) return move
def __init__(self, auth_endpoint=None, token_endpoint=None, client_id=None, redirect_uri=None): self._auth_endpoint = auth_endpoint self._token_endpoint = token_endpoint self._client_id = client_id self._redirect_uri = redirect_uri self._code_verifier = _generate_code_verifier() code_challenge = _create_code_challenge(self._code_verifier) self._code_challenge = code_challenge state = _generate_state_parameter() self._state = state self._credentials = None self._refresh_token = None self._headers = {"content-type": "application/x-www-form-urlencoded"} self._expired = False self._params = { "client_id": client_id, # This must match the Client ID of the OAuth application. "response_type": "code", # Indicates the authorization code grant "scope": "openid offline_access", # ensures that the /token endpoint returns an ID and refresh token # callback location where the user-agent will be directed to. "redirect_uri": self._redirect_uri, "state": state, "code_challenge": code_challenge, "code_challenge_method": "S256", } # Prefer to use already-fetched token values when they've been set globally. self._refresh_token = _keyring.get_password( _keyring_service_name, _keyring_refresh_token_storage_key) access_token = _keyring.get_password( _keyring_service_name, _keyring_access_token_storage_key) if access_token: self._credentials = Credentials(access_token=access_token) return # In the absence of globally-set token values, initiate the token request flow q = _Queue() # First prepare the callback server in the background server = self._create_callback_server(q) server_process = _Process(target=server.handle_request) server_process.start() # Send the call to request the authorization code self._request_authorization_code() # Request the access token once the auth code has been received. auth_code = q.get() server_process.terminate() self.request_access_token(auth_code)
def __thread(self): """Monitor thread function.""" _current_thread().name = "Monitor%d" % (self.__id) self.__prepared.set() self.info("Creating the fork") self.__worker = _Process(target=self.__procedure) while not self.__events.waitStart(self.__checkPeriod): self.debug("Waiting to start") # FIXME: msg to be removed, together with the timeout self.info("Starting the fork") self.__worker.start() self.__processMonitoring() self.info("Monitor has finished its task")
def start(self): args = (self._running,) if self._manager is not None: root = findroot(self._manager) parent, child = Pipe() self._bridge = Bridge(root, socket=parent) self._bridge.start() args = (self._running, child,) self._process = _Process(target=self.__main__, args=args) self._process.daemon = True if HAS_MULTIPROCESSING == 2: setattr(self._process, "isAlive", self._process.is_alive) self._running.acquire() self._running.value = True self._running.release() self._process.start()
def processes_start(self): """.""" # Create shared memory objects to be shared with worker processes. arr = self._sofb_current_readback_ref rbref = _shm.Array(_shm.ctypes.c_double, arr.size, lock=False) self._sofb_current_readback_ref = _np.ndarray( arr.shape, dtype=arr.dtype, buffer=memoryview(rbref)) ref = _shm.Array(_shm.ctypes.c_double, arr.size, lock=False) self._sofb_current_refmon = _np.ndarray( arr.shape, dtype=arr.dtype, buffer=memoryview(ref)) fret = _shm.Array(_shm.ctypes.c_int, arr.size, lock=False) self._sofb_func_return = _np.ndarray( arr.shape, dtype=_np.int32, buffer=memoryview(fret)) # Unit converter. self.converter = UnitConverter(self._sofb_psnames) # subdivide the pv list for the processes nr_bbbs = len(PSSOFB.BBBNAMES) div = nr_bbbs // self._nr_procs rem = nr_bbbs % self._nr_procs sub = [div*i + min(i, rem) for i in range(self._nr_procs+1)] for i in range(self._nr_procs): bbbnames = PSSOFB.BBBNAMES[sub[i]:sub[i+1]] evt = _Event() evt.set() theirs, mine = _Pipe(duplex=False) proc = _Process( target=PSSOFB._run_process, args=(self._ethbridge_cls, bbbnames, theirs, evt, arr.shape, rbref, ref, fret), daemon=True) proc.start() self._procs.append(proc) self._doneevts.append(evt) self._pipes.append(mine)
def start(self): args = (self._running, ) if self._manager is not None: root = findroot(self._manager) parent, child = Pipe() self._bridge = Bridge(root, socket=parent) self._bridge.start() args = ( self._running, child, ) self._process = _Process(target=self.__main__, args=args) self._process.daemon = True if HAS_MULTIPROCESSING == 2: setattr(self._process, "isAlive", self._process.is_alive) self._running.acquire() self._running.value = True self._running.release() self._process.start()
def ping(grid, current, goals, last_dir): current = tuple(current) goals = [tuple(x) for x in goals] shared_array_base = _Array(ctypes.c_int, len(goals) * 3) result = _np.ctypeslib.as_array(shared_array_base.get_obj()) result = result.reshape(len(goals), 3) #result = [ a_star_search([0,0,0], grid, current, goal) for goal in goals ] processes = [ _Process(target=a_star_search, args=(result[i], grid, current, goal)) for i, goal in enumerate(goals) ] #processes = [ _Process(target=a_star_search, args=(result, grid, current, goal)) ] for p in processes: p.start() for p in processes: p.join() print "Results:", result cost = 1000000000000000000000 #result[0][2] index = -1 for i, x in enumerate(result): if x[2] < cost and x[2] > 0: cost = x[2] index = i if index == -1: return last_dir next_move = (result[index][0], result[index][1]) move = get_dir(current, next_move) return move
def _generate_parallel(n_process, n_iter, gen_func, args_list): """ Generator which spawns processes to run generators, then uses a queue for each process to retrieve the results which it then yields. """ n_items = len(args_list) # calculate how to distribute generators over processes. if n_items <= n_process and n_process > 0: n_process = n_items n_pp = 1 n_left = 1 elif n_items > n_process and n_process > 0: n_pp = n_items // n_process n_left = n_pp + n_items % n_process # if one process specified just do the generator without sub processes. if n_process <= 1: gens = [] for arg in args_list: gens.append(gen_func(*arg)) generator = _izip(*gens) for s in generator: yield s return # split up argument list sub_lists = [args_list[0:n_left]] sub_lists.extend([ args_list[n_left + i * n_pp:n_left + (i + 1) * n_pp] for i in range(n_process - 1) ]) # create lists of queues, events, and processes. es = [] qs = [] ps = [] for i in range(n_process): e = _Event() q = _Queue(1) p = _Process(target=_worker, args=(gen_func, sub_lists[i], q, e)) p.daemon = True es.append(e) qs.append(q) ps.append(p) # start processes for p in ps: p.start() # for number of iterations for i in range(n_iter): s = [] # retrieve results for each sub-process and let the process know to continue calculation. for q, e in _izip(qs, es): s.extend(q.get()) e.set() # free process to do next calculation # yield all results yield tuple(s) # end processes for p in ps: p.join()
def __init__(self, *args, **kwargs): super(Process, self).__init__(*args, **kwargs) self.running = _Value("b", False) self.process = _Process(target=self._run, args=(self.run, self.running,)) self.parent, self.child = _Pipe()
def __init__(self, *args, **kwargs): super(Process, self).__init__(*args, **kwargs) self._running = _Value("b", False) self.process = _Process(target=self._run, args=(self.run, self._running,)) self.parent, self.child = _Pipe()