def sort_count_dp_multi(S_val, y_train, K, MM=None, n_jobs=4): indices = range(len(S_val)) if MM is None: MM = [None] * len(S_val) if n_jobs == 1: counters = [ sort_count_dp(S_val[i], y_train, K, MM[i]) for i in indices ] else: pool = Pool(n_jobs) counters = pool.map( partial(sort_count_dp_wrapper, S_val=S_val, y_train=y_train, K=K, MM=MM), indices) return counters
def sort_count_after_clean_multi(S_val, y_train, K, n_jobs=4, MM=None): indices = range(len(S_val)) if MM is None: MM = [None] * len(S_val) if n_jobs == 1: after_entropies = [ sort_count_after_clean(S_val[i], y_train, K, MM[i]) for i in indices ] else: pool = Pool(n_jobs) after_entropies = pool.map( partial(sort_count_after_clean_wrapper, S_val=S_val, y_train=y_train, K=K, MM=MM), indices) return after_entropies
def main(schema): pool = Pool(multiprocessing.cpu_count()) tables = [ Table( schema=schema, name=table, pk_s3=TABLES[table]['pk_s3'], pk_db=TABLES[table]['pk_db'], pk_data_type=TABLES[table]['pk_data_type'], files=TABLES[table]['files'], columns=TABLES[table]['columns'], ) for table in TABLES.keys() ] if not DEBUG: with pool: pool.map(process_tables, tables) else: for table in tables: table.process()
def setUp(self): self.maxDiff = None key = RSA.generate(2048) self.privateKey = PKCS1_PSS.new(key) publicKey = key.publickey() nodeKeys = [RSA.generate(2048) for i in range(4)] # these are the node keys print('BasicTest setup') self.pool = Pool(4, nodeKeys, publicKey) self.pool.start(self.pool.ids) clusterAddresses = [("127.0.0.1", 9110 + i) for i in range(4)] # [(ip_addr, port)] self.default_cluster = set(clusterAddresses) # the client needs to know the mapping to public keys self.clusterMap = { k: PKCS1_PSS.new(nodeKeys[i].publickey()) for i, k in enumerate(clusterAddresses) } #[(ip_addr, port) -> public key] sleep(5) # sleep to wait for servers to set up
def get_transformations(self, from_frame, to_tags, n=NUM_REDUNDANT_DETECTIONS): # define task def f(mapper, from_frame, to_tag, n): tfs = [] to_frame = 'Tag%s' % str(to_tag) listener = tf.TransformListener() listener.waitForTransform(from_frame, to_frame, rospy.Time(), rospy.Duration(4.0)) while (not mapper.is_shutdown) and (len(tfs) < n): try: t = listener.getLatestCommonTime(from_frame, to_frame) trans, rot = listener.lookupTransform(from_frame, to_frame, t) if self.is_valid_transformation(trans, rot): tfs.append(TF( trans=np.array(trans), rot=np.array(rot) )) except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException) as e: mapper.log(e, 'warn') time.sleep(0.1) return from_frame, to_tag, tfs # create a pool of threads p = Pool(NUM_THREADS) for to_tag in to_tags: self.log('Looking up transformations [%s] -> [%s]' % (from_frame, to_tag)) p.enqueue(f, self, from_frame, to_tag, n) # spin workers p.run() # wait for results tfs = dict() for f0, f, ts in p.iterate_results(): self.log('Looked up %d transformations [%s] -> [%s]' % (len(ts), f0, f)) tfs[f] = extract_transformations( ts, self.parameters['~snap_position'], self.parameters['~snap_orientation'], self.parameters['~orientation_resolution_deg'] ) # --- return tfs
def get_transformations(self, from_frames, to_frame, n=NUM_REDUNDANT_DETECTIONS): # define task def f(mapper, from_frame, to_frame, n): tfs = [] listener = tf.TransformListener() while (not mapper.is_shutdown) and (len(tfs) < n): try: t = listener.getLatestCommonTime(from_frame, to_frame) trans, rot = listener.lookupTransform(from_frame, to_frame, t) tfs.append(TF( trans=np.array(trans), rot=np.array(rot) )) except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException): continue time.sleep(0.2) return from_frame, to_frame, tfs # create a pool of threads p = Pool(NUM_THREADS) for from_frame in from_frames: self.log('Looking up transformations [%s] -> [%s]' % (from_frame, to_frame)) p.enqueue(f, self, from_frame, to_frame, n) # spin workers p.run() # wait for results tfs = dict() for f, f0, ts in p.iterate_results(): self.log('Looked up %d transformations [%s] -> [%s]' % (len(ts), f, f0)) tfs[f] = extract_transformations( ts, self.parameters['~snap_position'], self.parameters['~snap_orientation'], self.parameters['~orientation_resolution_deg'] ) # --- return tfs
pool = Pool(workers) start_time = timer() worker_args = [[entries_count // workers] + additional_args] finish_times = pool.starmap(func, worker_args * workers) return (statistics.stdev(finish_times), statistics.mean(finish_times) - start_time) if __name__ == '__main__': args = parser.parse_args() from utils import Pool as ServerPool from zatt.client.distributedDict import DistributedDict from utils import get_random_string print('Starting {} server instances'.format(args.s)) server_pool = ServerPool(args.s) server_pool.start(server_pool.ids) sleep(1) print('Setup completed') print('-' * 80) print('Started pushing data') stdev_time, elapsed_time = client_pool(writer, args.e, args.c, [args.d]) print('Finished pushing {} key/value pairs of size {} kB ' 'with {} clients'.format(args.e, args.d / 1000, args.c)) print('Duration: {:.2f} sec, stdev: {:.2}'.format(elapsed_time, stdev_time)) print('req/client: {}'.format(args.e // args.c)) print('Cumulative req/s: {:.2f}'.format(args.e / elapsed_time)) print('Average time/req (ms): {:.2f}'.format(elapsed_time / args.e * 1000))
class BasicTest(unittest.TestCase): def setUp(self): self.maxDiff = None key = RSA.generate(2048) self.privateKey = PKCS1_PSS.new(key) publicKey = key.publickey() nodeKeys = [RSA.generate(2048) for i in range(4)] # these are the node keys print('BasicTest setup') self.pool = Pool(4, nodeKeys, publicKey) self.pool.start(self.pool.ids) clusterAddresses = [("127.0.0.1", 9110 + i) for i in range(4)] # [(ip_addr, port)] self.default_cluster = set(clusterAddresses) # the client needs to know the mapping to public keys self.clusterMap = { k: PKCS1_PSS.new(nodeKeys[i].publickey()) for i, k in enumerate(clusterAddresses) } #[(ip_addr, port) -> public key] sleep(5) # sleep to wait for servers to set up def tearDown(self): self.pool.stop(self.pool.ids) self.pool.rm(self.pool.ids) def test_1_append(self): print('Append test') d = DistributedDict('127.0.0.1', 9110, self.clusterMap, self.privateKey) d['adams'] = 'the hitchhiker guide' del d sleep(1) d = DistributedDict('127.0.0.1', 9110, self.clusterMap, self.privateKey) self.assertEqual(d['adams'], 'the hitchhiker guide') def test_2_delete(self): print('Delete test') d = DistributedDict('127.0.0.1', 9110, self.clusterMap, self.privateKey) d['adams'] = 'the hitchhiker guide' sleep(1) del d['adams'] sleep(1) d = DistributedDict('127.0.0.1', 9110, self.clusterMap, self.privateKey) self.assertEqual(d, {'cluster': self.default_cluster}) def test_3_read_from_different_client(self): print('Read from different client') d = DistributedDict('127.0.0.1', 9110, self.clusterMap, self.privateKey) d['adams'] = 'the hitchhiker guide' del d sleep(1) d = DistributedDict('127.0.0.1', 9111, self.clusterMap, self.privateKey) self.assertEqual(d['adams'], 'the hitchhiker guide')
def setUp(self): self.pool = Pool(4, config_file) self.pool.start(self.pool.ids) self.client_pool = ClientProcess(1, config_file) self.client_pool.start(self.client_pool.ids) sleep(1)
class FailureModeAppendTest(unittest.TestCase): def setUp(self): self.pool = Pool(4, config_file) self.pool.start(self.pool.ids) self.client_pool = ClientProcess(1, config_file) self.client_pool.start(self.client_pool.ids) sleep(1) def tearDown(self): self.pool.stop(self.pool.ids) self.pool.rm(self.pool.ids) self.client_pool.stop(self.client_pool.ids) def test_append_write_failure_simple(self): print('Append test - Write Failure Simple') d = DistributedDict('127.0.0.1', 9116) d['adams'] = 'the hitchhiker guide' self.pool.stop(0) self.assertEqual(d['adams'], 'the hitchhiker guide') self.pool.start(0) self.pool.stop(1) d['0'] = '1' self.assertEqual(d['adams'], 'the hitchhiker guide') self.pool.start(1) self.pool.stop(2) d['1'] = '0' self.assertEqual(d['adams'], 'the hitchhiker guide') del d def test_append_write_failure_complex(self): print('Append test - Write Failure Complex') d = DistributedDict('127.0.0.1', 9116) d['adams'] = 'the hitchhiker guide' self.pool.stop(0) self.assertEqual(d['adams'], 'the hitchhiker guide') del d['adams'] self.pool.start(0) self.pool.stop(1) d['foo'] = 'bar' self.assertEqual(d['adams'], None) self.assertEqual(d['foo'], 'bar') self.pool.start(1) self.pool.stop(2) d['bar'] = 'foo' del d['foo'] self.assertEqual(d['adams'], None) self.assertEqual(d['foo'], None) self.assertEqual(d['bar'], 'foo') del d['bar'] self.pool.start(2) self.pool.stop(0) d['1'] = '0' self.assertEqual(d['adams'], None) self.assertEqual(d['foo'], None) self.assertEqual(d['bar'], None) self.assertEqual(d['1'], '0') self.pool.start(0) del d
class BasicAppendTest(unittest.TestCase): def setUp(self): self.pool = Pool(4, config_file) self.pool.start(self.pool.ids) self.client_pool = ClientProcess(3, config_file) self.client_pool.start(self.client_pool.ids) sleep(1) def tearDown(self): self.pool.stop(self.pool.ids) self.pool.rm(self.pool.ids) self.client_pool.stop(self.client_pool.ids) def test_append_read_same(self): print('Append test - Read Same') d = DistributedDict('127.0.0.1', 9116) d['adams'] = 'the hitchhiker guide' self.assertEqual(d['adams'], 'the hitchhiker guide') del d def test_append_read_different(self): print('Append test - Read Different') d = DistributedDict('127.0.0.1', 9116) d['adams'] = 'the hitchhiker guide' del d d = DistributedDict('127.0.0.1', 9117) self.assertEqual(d['adams'], 'the hitchhiker guide') del d d = DistributedDict('127.0.0.1', 9118) self.assertEqual(d['adams'], 'the hitchhiker guide') del d def test_append_write_multiple(self): print('Append test - Write Multiple') d0 = DistributedDict('127.0.0.1', 9116) d1 = DistributedDict('127.0.0.1', 9117) d2 = DistributedDict('127.0.0.1', 9118) d0['0'] = '0' d1['1'] = '1' d2['2'] = '2' self.assertEqual(d1['0'], '0') self.assertEqual(d2['1'], '1') self.assertEqual(d0['2'], '2') del d0 del d1 del d2 def test_delete_simple(self): print('Delete test - Simple') d = DistributedDict('127.0.0.1', 9116) d['adams'] = 'the hitchhiker guide' del d['adams'] self.assertEqual(d['adams'], None) del d def test_delete_complex(self): print('Delete test - Complex') d = DistributedDict('127.0.0.1', 9116) d['0'] = '0' d['1'] = '1' d['2'] = '2' d['3'] = '3' self.assertEqual(d['0'], '0') self.assertEqual(d['1'], '1') self.assertEqual(d['2'], '2') self.assertEqual(d['3'], '3') del d['0'] self.assertEqual(d['0'], None) self.assertEqual(d['1'], '1') self.assertEqual(d['2'], '2') self.assertEqual(d['3'], '3') del d['3'] del d['2'] self.assertEqual(d['0'], None) self.assertEqual(d['1'], '1') self.assertEqual(d['2'], None) self.assertEqual(d['3'], None) d['2'] = '3' self.assertEqual(d['0'], None) self.assertEqual(d['1'], '1') self.assertEqual(d['2'], '3') self.assertEqual(d['3'], None) del d
def setUp(self): self.maxDiff = None print('BasicTest setup') self.pool = Pool(3) self.pool.start(self.pool.ids) sleep(2)
class BasicTest(unittest.TestCase): def setUp(self): self.maxDiff = None print('BasicTest setup') self.pool = Pool(3) self.pool.start(self.pool.ids) sleep(2) def tearDown(self): self.pool.stop(self.pool.ids) self.pool.rm(self.pool.ids) def test_0_diagnostics(self): print('Diagnostics test') print('Restarting server 0 to force Follower state') self.pool.stop(0) # sleep(2) self.pool.start(0) sleep(2) expected =\ {'files': 'STUB', 'status': 'Follower', 'persist': {'votedFor': 'STUB', 'currentTerm': 'STUB'}, 'volatile': {'leaderId': 'STUB', 'address': ['127.0.0.1', 9110], 'cluster': set((('127.0.0.1', 9112), ('127.0.0.1', 9110), ('127.0.0.1', 9111)))}, 'log': {'commitIndex': -1, 'log': {'data': [], 'path': 'STUB'}, 'state_machine': {'lastApplied': -1, 'data': {}}, 'compacted': {'count': 0, 'term': None, 'path': 'STUB', 'data': {}}}} d = DistributedDict('127.0.0.1', 9110) diagnostics = d.diagnostic diagnostics['files'] = 'STUB' diagnostics['log']['compacted']['path'] = 'STUB' diagnostics['log']['log']['path'] = 'STUB' diagnostics['persist']['votedFor'] = 'STUB' diagnostics['persist']['currentTerm'] = 'STUB' diagnostics['volatile']['leaderId'] = 'STUB' diagnostics['volatile']['cluster'] =\ set(map(tuple, diagnostics['volatile']['cluster'])) self.assertEqual(expected, diagnostics) def test_1_append(self): print('Append test') d = DistributedDict('127.0.0.1', 9110) d['adams'] = 'the hitchhiker guide' del d sleep(1) d = DistributedDict('127.0.0.1', 9110) self.assertEqual(d['adams'], 'the hitchhiker guide') def test_2_delete(self): print('Delete test') d = DistributedDict('127.0.0.1', 9110) d['adams'] = 'the hitchhiker guide' del d['adams'] sleep(1) d = DistributedDict('127.0.0.1', 9110) self.assertEqual(d, {}) def test_3_read_from_different_client(self): print('Read from different client') d = DistributedDict('127.0.0.1', 9110) d['adams'] = 'the hitchhiker guide' del d sleep(1) d = DistributedDict('127.0.0.1', 9111) self.assertEqual(d['adams'], 'the hitchhiker guide') def test_4_compacted_log_replication(self): print('Compacted log replication') d = DistributedDict('127.0.0.1', 9110) d['test'] = 0 d['test'] = 1 d['test'] = 2 d['test'] = 3 d['test'] = 4 # compaction kicks in del d sleep(1) d = DistributedDict('127.0.0.1', 9111) self.assertEqual(d, {'test': 4}) def test_5_add_server(self): print('Add new server') d = DistributedDict('127.0.0.1', 9110) d['test'] = 0 self.pool.stop(self.pool.ids) self.pool.start(self.pool.ids) self.pool.configs[10] = { 'address': ('127.0.0.1', 9120), 'cluster': { ('127.0.0.1', 9120), }, 'storage': '20.persist', 'debug': False } self.pool.servers[10] = Process(target=self.pool._run_server, args=(self.pool.configs[10], )) self.pool.start(10) sleep(1) d.config_cluster('add', '127.0.0.1', 9120) sleep(1) del d d = DistributedDict('127.0.0.1', 9120) self.assertEqual(d, {'test': 0}) def test_6_remove_server(self): print('Remove server') d = DistributedDict('127.0.0.1', 9110) d.config_cluster('delete', '127.0.0.1', 9111) sleep(1) self.pool.stop(1) self.assertEqual(set(map(tuple, d.diagnostic['volatile']['cluster'])), {('127.0.0.1', 9112), ('127.0.0.1', 9110)})