def sort_count_dp_multi(S_val, y_train, K, MM=None, n_jobs=4): indices = range(len(S_val)) if MM is None: MM = [None] * len(S_val) if n_jobs == 1: counters = [ sort_count_dp(S_val[i], y_train, K, MM[i]) for i in indices ] else: pool = Pool(n_jobs) counters = pool.map( partial(sort_count_dp_wrapper, S_val=S_val, y_train=y_train, K=K, MM=MM), indices) return counters
def sort_count_after_clean_multi(S_val, y_train, K, n_jobs=4, MM=None): indices = range(len(S_val)) if MM is None: MM = [None] * len(S_val) if n_jobs == 1: after_entropies = [ sort_count_after_clean(S_val[i], y_train, K, MM[i]) for i in indices ] else: pool = Pool(n_jobs) after_entropies = pool.map( partial(sort_count_after_clean_wrapper, S_val=S_val, y_train=y_train, K=K, MM=MM), indices) return after_entropies
def main(schema): pool = Pool(multiprocessing.cpu_count()) tables = [ Table( schema=schema, name=table, pk_s3=TABLES[table]['pk_s3'], pk_db=TABLES[table]['pk_db'], pk_data_type=TABLES[table]['pk_data_type'], files=TABLES[table]['files'], columns=TABLES[table]['columns'], ) for table in TABLES.keys() ] if not DEBUG: with pool: pool.map(process_tables, tables) else: for table in tables: table.process()
def get_transformations(self, from_frame, to_tags, n=NUM_REDUNDANT_DETECTIONS): # define task def f(mapper, from_frame, to_tag, n): tfs = [] to_frame = 'Tag%s' % str(to_tag) listener = tf.TransformListener() listener.waitForTransform(from_frame, to_frame, rospy.Time(), rospy.Duration(4.0)) while (not mapper.is_shutdown) and (len(tfs) < n): try: t = listener.getLatestCommonTime(from_frame, to_frame) trans, rot = listener.lookupTransform(from_frame, to_frame, t) if self.is_valid_transformation(trans, rot): tfs.append(TF( trans=np.array(trans), rot=np.array(rot) )) except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException) as e: mapper.log(e, 'warn') time.sleep(0.1) return from_frame, to_tag, tfs # create a pool of threads p = Pool(NUM_THREADS) for to_tag in to_tags: self.log('Looking up transformations [%s] -> [%s]' % (from_frame, to_tag)) p.enqueue(f, self, from_frame, to_tag, n) # spin workers p.run() # wait for results tfs = dict() for f0, f, ts in p.iterate_results(): self.log('Looked up %d transformations [%s] -> [%s]' % (len(ts), f0, f)) tfs[f] = extract_transformations( ts, self.parameters['~snap_position'], self.parameters['~snap_orientation'], self.parameters['~orientation_resolution_deg'] ) # --- return tfs
def setUp(self): self.maxDiff = None key = RSA.generate(2048) self.privateKey = PKCS1_PSS.new(key) publicKey = key.publickey() nodeKeys = [RSA.generate(2048) for i in range(4)] # these are the node keys print('BasicTest setup') self.pool = Pool(4, nodeKeys, publicKey) self.pool.start(self.pool.ids) clusterAddresses = [("127.0.0.1", 9110 + i) for i in range(4)] # [(ip_addr, port)] self.default_cluster = set(clusterAddresses) # the client needs to know the mapping to public keys self.clusterMap = { k: PKCS1_PSS.new(nodeKeys[i].publickey()) for i, k in enumerate(clusterAddresses) } #[(ip_addr, port) -> public key] sleep(5) # sleep to wait for servers to set up
def get_transformations(self, from_frames, to_frame, n=NUM_REDUNDANT_DETECTIONS): # define task def f(mapper, from_frame, to_frame, n): tfs = [] listener = tf.TransformListener() while (not mapper.is_shutdown) and (len(tfs) < n): try: t = listener.getLatestCommonTime(from_frame, to_frame) trans, rot = listener.lookupTransform(from_frame, to_frame, t) tfs.append(TF( trans=np.array(trans), rot=np.array(rot) )) except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException): continue time.sleep(0.2) return from_frame, to_frame, tfs # create a pool of threads p = Pool(NUM_THREADS) for from_frame in from_frames: self.log('Looking up transformations [%s] -> [%s]' % (from_frame, to_frame)) p.enqueue(f, self, from_frame, to_frame, n) # spin workers p.run() # wait for results tfs = dict() for f, f0, ts in p.iterate_results(): self.log('Looked up %d transformations [%s] -> [%s]' % (len(ts), f, f0)) tfs[f] = extract_transformations( ts, self.parameters['~snap_position'], self.parameters['~snap_orientation'], self.parameters['~orientation_resolution_deg'] ) # --- return tfs
def setUp(self): self.pool = Pool(4, config_file) self.pool.start(self.pool.ids) self.client_pool = ClientProcess(1, config_file) self.client_pool.start(self.client_pool.ids) sleep(1)
def setUp(self): self.maxDiff = None print('BasicTest setup') self.pool = Pool(3) self.pool.start(self.pool.ids) sleep(2)