def run(self):
        """
        Run experiment
        """
        num_drivers = np.arange(1000, 6500, 500)
        # Create a pool of processes
        num_processes = mp.cpu_count()
        self.logger.info("Processes: {}".format(num_processes))
        pool = ProcessPool(nodes=num_processes)

        configs = []
        count = 0
        for drivers in num_drivers:
            self.config['RL_parameters'][
                'experiment'] = self.expt_name + "_" + str(count)
            self.config['RL_parameters'][
                'city_states_filename'] = "city_states.dill"
            self.config['RL_parameters']['num_drivers'] = drivers
            self.config['RL_parameters']['num_strategic_drivers'] = drivers
            configs.append(deepcopy(self.config))
            count += 1

        self.logger.info("Starting expt_02")

        results = pool.amap(self.run_rl_training, configs).get()
        pool.close()
        pool.join()
        pool.clear()

        self.logger.info("Finished expt_02")

        # Export best episode
        self.data_exporter.export_episode(results, self.expt_name + ".dill")
    def run(self):
        self.logger.info("Starting baselines")
        city_states = self.data_provider.read_city_states()
        baseline_list = self.config['baselines']['baseline_list']

        # Create a pool of processes
        num_processes = mp.cpu_count()
        self.logger.info("Processes: {}".format(num_processes))
        pool = ProcessPool(nodes=num_processes)

        configs = []
        for count in range(10):
            for name in baseline_list:
                configs.append({
                    'name': name,
                    'count': count,
                    'config': self.config,
                    'city_states': city_states
                })

        results = pool.amap(self.run_baseline, configs).get()
        pool.close()
        pool.join()
        pool.clear()

        episode_rewards = []
        for result in results:
            episode_rewards += result

        self.data_exporter.export_baseline_data(episode_rewards)
        self.logger.info("Finished baselines")
    def run(self):
        """
        Run experiment
        """
        num_drivers = np.arange(1000, 6500, 500)
        thresholds = np.arange(5, 55, 5)
        thresholds = np.insert(thresholds, 0, 2)
        combinations = list(itertools.product(num_drivers, thresholds))

        # Create a pool of processes
        num_processes = mp.cpu_count()
        self.logger.info("Processes: {}".format(num_processes))
        pool = ProcessPool(nodes=num_processes)

        configs = []
        count = 0
        for comb in combinations:
            self.config['RL_parameters'][
                'experiment'] = self.expt_name + "_" + str(count)
            self.config['RL_parameters']['num_drivers'] = comb[0]
            self.config['RL_parameters']['imbalance_threshold'] = comb[1]
            configs.append(deepcopy(self.config))
            count += 1

        self.logger.info("Starting expt_04")

        results = pool.amap(self.run_rl_training, configs).get()
        pool.close()
        pool.join()
        pool.clear()

        self.logger.info("Finished expt_04")

        # Export best episode
        self.data_exporter.export_episode(results, self.expt_name + ".dill")
コード例 #4
0
    def test_09_concurrent_multiple_readers_after_big_write(self):
        # Test issue #890
        redis_graphs = []
        for i in range(0, CLIENT_COUNT):
            redis_con = self.env.getConnection()
            redis_graphs.append(Graph("G890", redis_con))
        redis_graphs[0].query(
            """UNWIND(range(0,999)) as x CREATE()-[:R]->()""")
        read_query = """MATCH (n)-[r:R]->(m) RETURN n, r, m"""

        queries = [read_query] * CLIENT_COUNT
        pool = Pool(nodes=CLIENT_COUNT)

        # invoke queries
        m = pool.amap(thread_run_query, redis_graphs, queries)

        # wait for processes to return
        m.wait()

        # get the results
        result = m.get()

        for i in range(CLIENT_COUNT):
            if isinstance(result[i], str):
                self.env.assertIsNone(result[i])
            else:
                self.env.assertEquals(1000, len(result[i].result_set))
コード例 #5
0
ファイル: multi_process.py プロジェクト: jchamilton75/MySoft
def parallel_for_chunk(func, args, nprocs=8):
    pool = Pool(nprocs)
    slices = tuple(split(len(args), nprocs))
    def wrapper(islice):
        return func(args[slices[islice]])
    out = pool.amap(wrapper, xrange(len(slices))).get(TIMEOUT)
    return list(itertools.chain(*out))
    def run(self):
        """
        Run experiment
        """
        num_drivers = self.config['RL_parameters']['num_drivers']
        percent_strategic_drivers = np.arange(0, 1.1, 0.1)
        num_strategic_drivers = [int(x * num_drivers) for x in percent_strategic_drivers]

        # Create a pool of processes
        num_processes = mp.cpu_count()
        pool = ProcessPool(nodes=num_processes)

        configs = []
        count = 0
        for drivers in num_strategic_drivers:
            self.config['RL_parameters']['experiment'] = self.expt_name + "_" + str(count)
            self.config['RL_parameters']['num_strategic_drivers'] = drivers
            configs.append(deepcopy(self.config))
            count += 1

        self.logger.info("Starting expt_05")

        results = pool.amap(self.run_rl_training, configs).get()
        pool.close()
        pool.join()
        pool.clear()

        self.logger.info("Finished expt_05")

        # Export best episode
        self.data_exporter.export_episode(results, self.expt_name + ".dill")
    def run(self):
        """
        Run experiment
        """
        days = [
            'Sunday_00_', 'Monday_00_', 'Tuesday_00_', 'Wednesday_00_',
            'Thursday_00_', 'Friday_00_', 'Saturday_00_', 'Sunday_01_',
            'Monday_01_', 'Tuesday_01_', 'Wednesday_01_', 'Thursday_01_',
            'Friday_01_', 'Saturday_01_', 'Sunday_02_', 'Monday_02_',
            'Tuesday_02_', 'Wednesday_02_', 'Thursday_02_', 'Friday_02_',
            'Saturday_02_', 'Sunday_03_', 'Monday_03_', 'Tuesday_03_',
            'Wednesday_03_', 'Thursday_03_', 'Friday_03_', 'Saturday_03_',
            'Sunday_04_', 'Monday_04_', 'Tuesday_04_', 'Wednesday_04_',
            'Thursday_04_', 'Friday_04_', 'Saturday_04_'
        ]

        num_drivers = [4000, 5000, 6000, 7000, 8000, 9000, 10000]

        imbalance_thresholds = [2]

        # Create a pool of processes
        num_processes = mp.cpu_count()
        self.logger.info("Processes: {}".format(num_processes))
        pool = ProcessPool(nodes=num_processes)

        configs = []
        count = 0

        for d in num_drivers:
            for threshold in imbalance_thresholds:
                for day in days:
                    self.config['RL_parameters']['num_drivers'] = d
                    self.config['RL_parameters']['num_strategic_drivers'] = d

                    self.config['RL_parameters'][
                        'imbalance_threshold'] = threshold
                    self.config['RL_parameters'][
                        'experiment'] = self.expt_name + "_" + str(count)
                    if os.path.isfile(self.config['app']['DATA_DIR'] +
                                      'city_states/' + day +
                                      'city_states.dill'):
                        self.config['RL_parameters'][
                            'city_states_filename'] = day + 'city_states.dill'
                        self.config['RL_parameters']['best_model_filename'] = (
                            day + str(d) + '_' + str(threshold) +
                            '_model.dill')
                        configs.append(deepcopy(self.config))
                        count += 1

        self.logger.info("Starting expt_07")

        results = pool.amap(self.run_rl_training, configs).get()
        pool.close()
        pool.join()
        pool.clear()

        self.logger.info("Finished expt_07")
コード例 #8
0
    def run(self):
        """
        Run experiment
        """
        days = [
            'Sunday_00_', 'Monday_00_', 'Tuesday_00_', 'Wednesday_00_',
            'Thursday_00_', 'Friday_00_', 'Saturday_00_', 'Sunday_01_',
            'Monday_01_', 'Tuesday_01_', 'Wednesday_01_', 'Thursday_01_',
            'Friday_01_', 'Saturday_01_', 'Sunday_02_', 'Monday_02_',
            'Tuesday_02_', 'Wednesday_02_', 'Thursday_02_', 'Friday_02_',
            'Saturday_02_', 'Sunday_03_', 'Monday_03_', 'Tuesday_03_',
            'Wednesday_03_', 'Thursday_03_', 'Friday_03_', 'Saturday_03_'
        ]

        # Create a pool of processes
        num_processes = mp.cpu_count()
        pool = ProcessPool(nodes=num_processes)

        configs = []
        count = 0

        for day in days:
            self.config['RL_parameters'][
                'experiment'] = self.expt_name + "_" + str(count)
            self.config['RL_parameters'][
                'city_states_filename'] = day + 'city_states.dill'
            self.config['RL_parameters'][
                'best_model_filename'] = day + 'model.dill'
            configs.append(deepcopy(self.config))
            count += 1

        self.logger.info("Starting expt_06")

        results = pool.amap(self.run_rl_training, configs).get()
        pool.close()
        pool.join()
        pool.clear()

        self.logger.info("Finished expt_06")

        # Export best episode
        self.data_exporter.export_episode(results, self.expt_name + ".dill")
コード例 #9
0
def test_mp():
    # instantiate and configure the worker pool
    from pathos.pools import ProcessPool
    pool = ProcessPool(nodes=4)

    _result = list(map(pow, [1,2,3,4], [5,6,7,8])) 

    # do a blocking map on the chosen function
    result = pool.map(pow, [1,2,3,4], [5,6,7,8])
    assert result == _result

    # do a non-blocking map, then extract the result from the iterator
    result_iter = pool.imap(pow, [1,2,3,4], [5,6,7,8])
    result = list(result_iter)
    assert result == _result

    # do an asynchronous map, then get the results
    result_queue = pool.amap(pow, [1,2,3,4], [5,6,7,8])
    result = result_queue.get()
    assert result == _result
コード例 #10
0
def test_mp():
    # instantiate and configure the worker pool
    from pathos.pools import ProcessPool
    pool = ProcessPool(nodes=4)

    _result = list(map(pow, [1, 2, 3, 4], [5, 6, 7, 8]))

    # do a blocking map on the chosen function
    result = pool.map(pow, [1, 2, 3, 4], [5, 6, 7, 8])
    assert result == _result

    # do a non-blocking map, then extract the result from the iterator
    result_iter = pool.imap(pow, [1, 2, 3, 4], [5, 6, 7, 8])
    result = list(result_iter)
    assert result == _result

    # do an asynchronous map, then get the results
    result_queue = pool.amap(pow, [1, 2, 3, 4], [5, 6, 7, 8])
    result = result_queue.get()
    assert result == _result
コード例 #11
0
ファイル: test_stress.py プロジェクト: kokizzu/RedisGraph
    def test03_clean_shutdown(self):
        # skip test if we're running under COV=1
        if os.getenv('COV') == '1':
            self.env.skip(
            )  # valgrind is not working correctly with multi process

        # issue SHUTDOWN while traffic is generated
        indexes = range(self.client_count)
        pool = Pool(nodes=self.client_count)

        # invoke queries
        m = pool.amap(query_crud, graphs, indexes)

        # sleep for half a second, allowing threads to kick in
        sleep(0.2)

        self.env.stop()

        # wait for processes to return
        m.wait()

        self.env.assertTrue(self.env.checkExitCode())
コード例 #12
0
    def run(self):
        """
        Run experiment
        """
        ind_percent = np.arange(0., 1.1, 0.1)
        reb_percent = np.arange(0., 1.1, 0.1)
        ind_percent[0] = 0.01
        reb_percent[0] = 0.01
        combinations = list(itertools.product(ind_percent, reb_percent))
        num_episodes = self.config['RL_parameters']['num_episodes']

        # Create a pool of processes
        num_processes = mp.cpu_count()
        pool = ProcessPool(nodes=num_processes)

        configs = []
        count = 0
        for comb in combinations:
            self.config['RL_parameters']['experiment'] = self.expt_name + "_" + str(count)
            ind_episodes = int(comb[0] * num_episodes)
            reb_episodes = int(comb[1] * num_episodes)
            if (ind_episodes + reb_episodes) < num_episodes:
                self.config['RL_parameters']['ind_episodes'] = ind_episodes
                self.config['RL_parameters']['reb_episodes'] = reb_episodes
                configs.append(deepcopy(self.config))
                count += 1

        self.logger.info("Starting expt_01")

        results = pool.amap(self.run_rl_training, configs).get()
        pool.close()
        pool.join()
        pool.clear()

        self.logger.info("Finished expt_01")

        # Export best episode
        self.data_exporter.export_episode(results, self.expt_name + ".dill")
    def run(self):
        """
        Run experiment
        """
        num_drivers = np.arange(1000, 6500, 500)
        objectives = ['pickups', 'revenue']
        combinations = list(itertools.product(num_drivers, objectives))

        # Create a pool of processes
        num_processes = mp.cpu_count()
        pool = ProcessPool(nodes=num_processes)

        configs = []
        count = 0
        for comb in combinations:
            self.config['RL_parameters'][
                'experiment'] = self.expt_name + "_" + str(count)
            self.config['RL_parameters'][
                'city_states_filename'] = "city_states.dill"
            self.config['RL_parameters']['num_drivers'] = comb[0]
            self.config['RL_parameters']['num_strategic_drivers'] = comb[0]
            self.config['RL_parameters']['objective'] = comb[1]
            configs.append(deepcopy(self.config))
            count += 1

        self.logger.info("Starting expt_03")

        results = pool.amap(self.run_rl_training, configs).get()
        pool.close()
        pool.join()
        pool.clear()

        self.logger.info("Finished expt_03")

        # Export best episode
        self.data_exporter.export_episode(results, self.expt_name + ".dill")
コード例 #14
0
ファイル: test_mp.py プロジェクト: daodaoliang/pathos
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2015 California Institute of Technology.
# License: 3-clause BSD.  The full license text is available at:
#  - http://trac.mystic.cacr.caltech.edu/project/pathos/browser/pathos/LICENSE

from pathos.helpers import freeze_support
freeze_support()

# instantiate and configure the worker pool
from pathos.pools import ProcessPool
pool = ProcessPool(nodes=4)

_result = map(pow, [1,2,3,4], [5,6,7,8]) 

# do a blocking map on the chosen function
result = pool.map(pow, [1,2,3,4], [5,6,7,8])
assert result == _result

# do a non-blocking map, then extract the result from the iterator
result_iter = pool.imap(pow, [1,2,3,4], [5,6,7,8])
result = list(result_iter)
assert result == _result

# do an asynchronous map, then get the results
result_queue = pool.amap(pow, [1,2,3,4], [5,6,7,8])
result = result_queue.get()
assert result == _result
コード例 #15
0
    def test_05_concurrent_read_delete(self):
        redis_con = self.env.getConnection()

        ##############################################################################################
        # Delete graph via Redis DEL key.
        ##############################################################################################
        self.populate_graph()
        pool = Pool(nodes=CLIENT_COUNT)

        q = """UNWIND (range(0, 10000)) AS x WITH x AS x WHERE (x / 900) = 1 RETURN x"""
        queries = [q] * CLIENT_COUNT
        # invoke queries
        m = pool.amap(thread_run_query, graphs, queries)

        redis_con.delete(GRAPH_ID)

        # wait for processes to return
        m.wait()

        # get the results
        result = m.get()

        # validate result.
        self.env.assertTrue(all([r.result_set[0][0] == 900 for r in result]))

        # Make sure Graph is empty, e.g. graph was deleted.
        resultset = graphs[0].query("MATCH (n) RETURN count(n)").result_set
        self.env.assertEquals(resultset[0][0], 0)

        ##############################################################################################
        # Delete graph via Redis FLUSHALL.
        ##############################################################################################
        self.populate_graph()
        q = """UNWIND (range(0, 10000)) AS x WITH x AS x WHERE (x / 900) = 1 RETURN x"""
        queries = [q] * CLIENT_COUNT
        # invoke queries
        m = pool.amap(thread_run_query, graphs, queries)

        redis_con.flushall()

        # wait for processes to return
        m.wait()

        # get the results
        result = m.get()

        # validate result.
        self.env.assertTrue(all([r.result_set[0][0] == 900 for r in result]))

        # Make sure Graph is empty, e.g. graph was deleted.
        resultset = graphs[0].query("MATCH (n) RETURN count(n)").result_set
        self.env.assertEquals(resultset[0][0], 0)

        ##############################################################################################
        # Delete graph via GRAPH.DELETE.
        ##############################################################################################
        self.populate_graph()
        q = """UNWIND (range(0, 10000)) AS x WITH x AS x WHERE (x / 900) = 1 RETURN x"""
        queries = [q] * CLIENT_COUNT
        # invoke queries
        m = pool.amap(thread_run_query, graphs, queries)

        graphs[-1].delete()

        # wait for processes to return
        m.wait()

        # get the results
        result = m.get()

        # validate result.
        self.env.assertTrue(all([r.result_set[0][0] == 900 for r in result]))

        # Make sure Graph is empty, e.g. graph was deleted.
        resultset = graphs[0].query("MATCH (n) RETURN count(n)").result_set
        self.env.assertEquals(resultset[0][0], 0)
コード例 #16
0
ファイル: multi_process.py プロジェクト: jchamilton75/MySoft
def parallel_for(func, args, nprocs=8):
    pool = Pool(nprocs)
    return pool.amap(func, args).get(TIMEOUT)
コード例 #17
0
    def test_05_concurrent_read_delete(self):
        ##############################################################################################
        # Delete graph via Redis DEL key.
        ##############################################################################################
        self.populate_graph()
        pool = Pool(nodes=CLIENT_COUNT)
        manager = pathos_multiprocess.Manager()
        barrier = manager.Barrier(CLIENT_COUNT)
        barriers = [barrier] * CLIENT_COUNT

        q = """UNWIND (range(0, 10000)) AS x WITH x AS x WHERE (x / 900) = 1 RETURN x"""
        queries = [q] * CLIENT_COUNT
        # invoke queries
        m = pool.amap(thread_run_query, queries, barriers)

        self.conn.delete(GRAPH_ID)

        # wait for processes to return
        m.wait()

        # get the results
        results = m.get()

        # validate result.
        self.env.assertTrue(
            all([r["result_set"][0][0] == 900 for r in results]))

        # Make sure Graph is empty, e.g. graph was deleted.
        resultset = self.graph.query("MATCH (n) RETURN count(n)").result_set
        self.env.assertEquals(resultset[0][0], 0)
        ##############################################################################################
        # Delete graph via Redis FLUSHALL.
        ##############################################################################################
        self.populate_graph()
        q = """UNWIND (range(0, 10000)) AS x WITH x AS x WHERE (x / 900) = 1 RETURN x"""
        queries = [q] * CLIENT_COUNT
        barrier = manager.Barrier(CLIENT_COUNT)
        barriers = [barrier] * CLIENT_COUNT
        # invoke queries
        m = pool.amap(thread_run_query, queries, barriers)

        self.conn.flushall()

        # wait for processes to return
        m.wait()

        # get the results
        results = m.get()

        # validate result.
        self.env.assertTrue(
            all([r["result_set"][0][0] == 900 for r in results]))

        # Make sure Graph is empty, e.g. graph was deleted.
        resultset = self.graph.query("MATCH (n) RETURN count(n)").result_set
        self.env.assertEquals(resultset[0][0], 0)
        ##############################################################################################
        # Delete graph via GRAPH.DELETE.
        ##############################################################################################
        self.populate_graph()
        q = """UNWIND (range(0, 10000)) AS x WITH x AS x WHERE (x / 900) = 1 RETURN x"""
        queries = [q] * CLIENT_COUNT
        barrier = manager.Barrier(CLIENT_COUNT)
        barriers = [barrier] * CLIENT_COUNT
        # invoke queries
        m = pool.amap(thread_run_query, queries, barriers)

        self.graph.delete()

        # wait for processes to return
        m.wait()

        # get the results
        results = m.get()

        # validate result.
        self.env.assertTrue(
            all([r["result_set"][0][0] == 900 for r in results]))

        # Make sure Graph is empty, e.g. graph was deleted.
        resultset = self.graph.query("MATCH (n) RETURN count(n)").result_set
        self.env.assertEquals(resultset[0][0], 0)
    def run(self):
        """
        Run experiment
        """
        days = [
            'Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday',
            'Saturday'
        ]
        weeks_of_month = ['00', '01', '02', '03', '04']
        imbalance_thresholds = [2]
        model_num_drivers = [4000, 5000, 6000, 7000, 8000, 9000, 10000]

        test_combinations = []
        for model_day in days:
            for model_wom in weeks_of_month:
                for model_threshold in imbalance_thresholds:
                    for model_drivers in model_num_drivers:
                        model_args = [
                            model_day, model_wom,
                            str(model_drivers),
                            str(model_threshold)
                        ]
                        model_filename = "_".join(model_args) + "_model.dill"
                        if os.path.isfile(self.config['app']['DATA_DIR'] +
                                          'models/' + model_filename):
                            for test_wom in weeks_of_month:
                                for test_drivers in range(
                                        model_drivers - 3000,
                                        model_drivers + 4000, 1000):
                                    test_file = model_day + '_' + test_wom + '_city_states.dill'
                                    if os.path.isfile(
                                            self.config['app']['DATA_DIR'] +
                                            'city_states/' + test_file):
                                        test_combinations.append({
                                            'model':
                                            model_filename,
                                            'test_dow':
                                            model_day,
                                            'test_wom':
                                            test_wom,
                                            'test_drivers':
                                            test_drivers
                                        })
        self.logger.info("Total test combinations: {}".format(
            len(test_combinations)))

        # Create a pool of processes
        num_processes = mp.cpu_count()
        pool = ProcessPool(nodes=num_processes)

        configs = []
        count = 0

        for comb in test_combinations:
            self.config['Model_testing'][
                'experiment'] = self.expt_name + "_" + str(count)
            self.config['Model_testing']['city_states_filename'] = (
                comb['test_dow'] + '_' + comb['test_wom'] +
                '_city_states.dill')
            self.config['Model_testing']['model_filename'] = comb['model']
            self.config['RL_parameters']['num_drivers'] = comb['test_drivers']
            self.config['RL_parameters']['num_strategic_drivers'] = comb[
                'test_drivers']

            configs.append(deepcopy(self.config))
            count += 1

        self.logger.info("Starting expt_08")

        results = pool.amap(self.run_rl_testing, configs).get()
        pool.close()
        pool.join()
        pool.clear()

        self.logger.info("Finished expt_08")

        # Export best episode
        self.data_exporter.export_episode(results, self.expt_name + ".dill")
コード例 #19
0
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2015 California Institute of Technology.
# License: 3-clause BSD.  The full license text is available at:
#  - http://trac.mystic.cacr.caltech.edu/project/pathos/browser/pathos/LICENSE

from pathos.helpers import freeze_support
freeze_support()

# instantiate and configure the worker pool
from pathos.pools import ProcessPool
pool = ProcessPool(nodes=4)

_result = map(pow, [1, 2, 3, 4], [5, 6, 7, 8])

# do a blocking map on the chosen function
result = pool.map(pow, [1, 2, 3, 4], [5, 6, 7, 8])
assert result == _result

# do a non-blocking map, then extract the result from the iterator
result_iter = pool.imap(pow, [1, 2, 3, 4], [5, 6, 7, 8])
result = list(result_iter)
assert result == _result

# do an asynchronous map, then get the results
result_queue = pool.amap(pow, [1, 2, 3, 4], [5, 6, 7, 8])
result = result_queue.get()
assert result == _result