def run(self):
        """
        Run experiment
        """
        num_drivers = np.arange(1000, 6500, 500)
        # Create a pool of processes
        num_processes = mp.cpu_count()
        self.logger.info("Processes: {}".format(num_processes))
        pool = ProcessPool(nodes=num_processes)

        configs = []
        count = 0
        for drivers in num_drivers:
            self.config['RL_parameters'][
                'experiment'] = self.expt_name + "_" + str(count)
            self.config['RL_parameters'][
                'city_states_filename'] = "city_states.dill"
            self.config['RL_parameters']['num_drivers'] = drivers
            self.config['RL_parameters']['num_strategic_drivers'] = drivers
            configs.append(deepcopy(self.config))
            count += 1

        self.logger.info("Starting expt_02")

        results = pool.amap(self.run_rl_training, configs).get()
        pool.close()
        pool.join()
        pool.clear()

        self.logger.info("Finished expt_02")

        # Export best episode
        self.data_exporter.export_episode(results, self.expt_name + ".dill")
Beispiel #2
0
    def test_calc_uncertainty_pool_pass(self):
        """Test parallel compute the uncertainty distribution for an impact"""

        exp_unc, impf_unc, _ = make_input_vars()
        haz = haz_dem()
        unc_calc = CalcImpact(exp_unc, impf_unc, haz)
        unc_data = unc_calc.make_sample(N=2)

        pool = Pool(nodes=2)
        try:
            unc_data = unc_calc.uncertainty(unc_data,
                                            calc_eai_exp=False,
                                            calc_at_event=False,
                                            pool=pool)
        finally:
            pool.close()
            pool.join()
            pool.clear()

        self.assertEqual(unc_data.unit, exp_dem().value_unit)
        self.assertListEqual(unc_calc.rp, [5, 10, 20, 50, 100, 250])
        self.assertEqual(unc_calc.calc_eai_exp, False)
        self.assertEqual(unc_calc.calc_at_event, False)

        self.assertEqual(unc_data.aai_agg_unc_df.size, unc_data.n_samples)
        self.assertEqual(unc_data.tot_value_unc_df.size, unc_data.n_samples)

        self.assertEqual(unc_data.freq_curve_unc_df.size,
                         unc_data.n_samples * len(unc_calc.rp))
        self.assertTrue(unc_data.eai_exp_unc_df.empty)
        self.assertTrue(unc_data.at_event_unc_df.empty)
Beispiel #3
0
    def test_calc_uncertainty_pool_pass(self):
        """Test compute the uncertainty distribution for an impact"""

        ent_iv, _ = make_costben_iv()
        _, _, haz_iv = make_input_vars()
        unc_calc = CalcCostBenefit(haz_iv, ent_iv)
        unc_data = unc_calc.make_sample(N=2)

        pool = Pool(n=2)
        try:
            unc_data = unc_calc.uncertainty(unc_data, pool=pool)
        finally:
            pool.close()
            pool.join()
            pool.clear()

        self.assertEqual(unc_data.unit, ent_dem().exposures.value_unit)

        self.assertEqual(unc_data.tot_climate_risk_unc_df.size,
                         unc_data.n_samples)
        self.assertEqual(
            unc_data.cost_ben_ratio_unc_df.size,
            unc_data.n_samples * 4  #number of measures
        )
        self.assertEqual(unc_data.imp_meas_present_unc_df.size, 0)
        self.assertEqual(
            unc_data.imp_meas_future_unc_df.size,
            unc_data.n_samples * 4 * 5  #All measures 4 and risks/benefits 5
        )
    def run(self):
        """
        Run experiment
        """
        num_drivers = np.arange(1000, 6500, 500)
        thresholds = np.arange(5, 55, 5)
        thresholds = np.insert(thresholds, 0, 2)
        combinations = list(itertools.product(num_drivers, thresholds))

        # Create a pool of processes
        num_processes = mp.cpu_count()
        self.logger.info("Processes: {}".format(num_processes))
        pool = ProcessPool(nodes=num_processes)

        configs = []
        count = 0
        for comb in combinations:
            self.config['RL_parameters'][
                'experiment'] = self.expt_name + "_" + str(count)
            self.config['RL_parameters']['num_drivers'] = comb[0]
            self.config['RL_parameters']['imbalance_threshold'] = comb[1]
            configs.append(deepcopy(self.config))
            count += 1

        self.logger.info("Starting expt_04")

        results = pool.amap(self.run_rl_training, configs).get()
        pool.close()
        pool.join()
        pool.clear()

        self.logger.info("Finished expt_04")

        # Export best episode
        self.data_exporter.export_episode(results, self.expt_name + ".dill")
    def run(self):
        self.logger.info("Starting baselines")
        city_states = self.data_provider.read_city_states()
        baseline_list = self.config['baselines']['baseline_list']

        # Create a pool of processes
        num_processes = mp.cpu_count()
        self.logger.info("Processes: {}".format(num_processes))
        pool = ProcessPool(nodes=num_processes)

        configs = []
        for count in range(10):
            for name in baseline_list:
                configs.append({
                    'name': name,
                    'count': count,
                    'config': self.config,
                    'city_states': city_states
                })

        results = pool.amap(self.run_baseline, configs).get()
        pool.close()
        pool.join()
        pool.clear()

        episode_rewards = []
        for result in results:
            episode_rewards += result

        self.data_exporter.export_baseline_data(episode_rewards)
        self.logger.info("Finished baselines")
    def run(self):
        """
        Run experiment
        """
        days = [
            'Sunday_00_', 'Monday_00_', 'Tuesday_00_', 'Wednesday_00_',
            'Thursday_00_', 'Friday_00_', 'Saturday_00_', 'Sunday_01_',
            'Monday_01_', 'Tuesday_01_', 'Wednesday_01_', 'Thursday_01_',
            'Friday_01_', 'Saturday_01_', 'Sunday_02_', 'Monday_02_',
            'Tuesday_02_', 'Wednesday_02_', 'Thursday_02_', 'Friday_02_',
            'Saturday_02_', 'Sunday_03_', 'Monday_03_', 'Tuesday_03_',
            'Wednesday_03_', 'Thursday_03_', 'Friday_03_', 'Saturday_03_',
            'Sunday_04_', 'Monday_04_', 'Tuesday_04_', 'Wednesday_04_',
            'Thursday_04_', 'Friday_04_', 'Saturday_04_'
        ]

        num_drivers = [4000, 5000, 6000, 7000, 8000, 9000, 10000]

        imbalance_thresholds = [2]

        # Create a pool of processes
        num_processes = mp.cpu_count()
        self.logger.info("Processes: {}".format(num_processes))
        pool = ProcessPool(nodes=num_processes)

        configs = []
        count = 0

        for d in num_drivers:
            for threshold in imbalance_thresholds:
                for day in days:
                    self.config['RL_parameters']['num_drivers'] = d
                    self.config['RL_parameters']['num_strategic_drivers'] = d

                    self.config['RL_parameters'][
                        'imbalance_threshold'] = threshold
                    self.config['RL_parameters'][
                        'experiment'] = self.expt_name + "_" + str(count)
                    if os.path.isfile(self.config['app']['DATA_DIR'] +
                                      'city_states/' + day +
                                      'city_states.dill'):
                        self.config['RL_parameters'][
                            'city_states_filename'] = day + 'city_states.dill'
                        self.config['RL_parameters']['best_model_filename'] = (
                            day + str(d) + '_' + str(threshold) +
                            '_model.dill')
                        configs.append(deepcopy(self.config))
                        count += 1

        self.logger.info("Starting expt_07")

        results = pool.amap(self.run_rl_training, configs).get()
        pool.close()
        pool.join()
        pool.clear()

        self.logger.info("Finished expt_07")
Beispiel #7
0
def opt_ind_params(perf_f, res, games, gids, role, pid, init_params, bounds):
    def min_f(params_test):
        return (-perf_f(params_test, res, games, gids, role, pid))

    pool = ProcessPool(nodes=mp.cpu_count())
    opt = scp.optimize.differential_evolution(min_f, bounds, workers=pool.map)
    return opt
    pool.close()
    pool.join()
    pool.clear()
Beispiel #8
0
def multi_process(data_path, time_list):
    for time in time_list[:]:
        # print(time)
        base_path = arrow.get(time['ini']).format('YYYYMMDDHH')
        # --预报数据处理
        gefs_fcst = GEFSFcst(data_path['gefs_fcst'], time, base_path)
        p = ProcessPool(7)
        for n in range(21):
            # gefs_fcst.download(n)
            p.apipe(download, gefs_fcst, n)
        p.close()
        p.join()
        p.clear()
Beispiel #9
0
    def get_city_states(self):
        """
        Creates city states from start time to end time
        :param:
        :return:
        """
        city_states = []
        start_time = self.start_time
        end_time = self.end_time

        # Create array of time slice values between the start and end time
        business_days = self.config['city_state_creator']['business_days']
        business_hours_start = self.config['city_state_creator'][
            'business_hours_start']
        business_hours_end = self.config['city_state_creator'][
            'business_hours_end']
        index = pd.date_range(start=start_time,
                              end=end_time,
                              freq=str(self.time_unit_duration) + 'min')

        # Filter only the required days and hours
        index = index[index.day_name().isin(business_days)]
        index = index[(index.hour >= business_hours_start)
                      & (index.hour <= business_hours_end)]
        time_slice_starts = index - timedelta(
            minutes=self.time_slice_duration / 2)
        time_slice_ends = index + timedelta(minutes=self.time_slice_duration /
                                            2)

        # Create arguments dictionary for parallelization
        self.parallel_args = self.create_parallel_args(index,
                                                       time_slice_starts,
                                                       time_slice_ends)

        # Create city states
        manager = Manager()
        city_states = manager.dict()
        N = len(index.values)

        # Create parallel pool
        self.logger.info("Creating parallelization pool")
        pool = ProcessPool(nodes=25)
        pool.map(self.get_city_state, ([city_states, t] for t in xrange(N)))
        pool.close()
        pool.join()
        pool.clear()
        self.logger.info("Finished creating city states")

        return dict(city_states)
    def test_est_comp_time_pass(self):

        exp_unc, _, haz_unc = make_imp_uncs()

        unc = Uncertainty({'exp': exp_unc, 'haz': haz_unc})

        unc.make_sample(N=1, sampling_kwargs={'calc_second_order': False})
        est = unc.est_comp_time(0.12345)
        self.assertEqual(est, 1 * (2 + 2) * 0.123)  # N * (D + 2)

        pool = Pool(nodes=4)
        est = unc.est_comp_time(0.12345, pool)
        self.assertEqual(est, 1 * (2 + 2) * 0.123 / 4)  # N * (D + 2)
        pool.close()
        pool.join()
        pool.clear()
Beispiel #11
0
def test_pathos_pp_callable () :
    """Test parallel processnig with pathos: ParallelPool  
    """
    logger = getLogger("ostap.test_pathos_pp_callable")         
    if not pathos :
        logger.error ( "pathos is not available" )
        return
    
    logger.info ('Test job submission with %s' %  pathos ) 
    
    if DILL_PY3_issue : 
        logger.warning ("test is disabled (DILL/ROOT/PY3 issue)" )
        return

    ## logger.warning ("test is disabled for UNKNOWN REASON")
    ## return

    from pathos.helpers import cpu_count
    ncpus = cpu_count  ()
    
    from pathos.pools import ParallelPool as Pool 

    pool = Pool ( ncpus )   
    logger.info ( "Pool is %s" %  ( type ( pool ).__name__ ) )

    pool.restart ( True ) 


    mh   = MakeHisto() 
    jobs = pool.uimap ( mh.process ,  [  ( i , n )  for  ( i , n ) in enumerate ( inputs ) ] )
    
    result = None 
    for h in progress_bar ( jobs , max_value = len ( inputs ) ) :
        if not result  : result = h
        else           : result.Add ( h )

    pool.close ()
    pool.join  ()
    pool.clear ()
    
    logger.info ( "Histogram is %s" % result.dump ( 80 , 10 )  )
    logger.info ( "Entries  %s/%s" % ( result.GetEntries() , sum ( inputs ) ) ) 
    
    with wait ( 1 ) , use_canvas ( 'test_pathos_pp_callable' ) : 
        result.draw (   ) 

    return result 
Beispiel #12
0
def opt_pop_params(perf_f, res, games, gids, init_params, bounds):
    def min_f(params):
        n_p1 = len(res[gids[0]]["params"][0])
        n_p2 = len(res[gids[0]]["params"][1])
        ll = 0
        for pid in range(n_p1):
            ll += perf_f(params, res, games, gids, 0, pid)
        for pid in range(n_p2):
            ll += perf_f(params, res, games, gids, 1, pid)
        return -ll

    pool = ProcessPool()
    opt = scp.optimize.differential_evolution(min_f, bounds, workers=pool.imap)
    pool.close()
    pool.join()
    pool.clear()
    # opt = scp.optimize.differential_evolution(min_f, bounds)
    return (opt)
Beispiel #13
0
    def run(self):
        """
        Run experiment
        """
        days = [
            'Sunday_00_', 'Monday_00_', 'Tuesday_00_', 'Wednesday_00_',
            'Thursday_00_', 'Friday_00_', 'Saturday_00_', 'Sunday_01_',
            'Monday_01_', 'Tuesday_01_', 'Wednesday_01_', 'Thursday_01_',
            'Friday_01_', 'Saturday_01_', 'Sunday_02_', 'Monday_02_',
            'Tuesday_02_', 'Wednesday_02_', 'Thursday_02_', 'Friday_02_',
            'Saturday_02_', 'Sunday_03_', 'Monday_03_', 'Tuesday_03_',
            'Wednesday_03_', 'Thursday_03_', 'Friday_03_', 'Saturday_03_'
        ]

        # Create a pool of processes
        num_processes = mp.cpu_count()
        pool = ProcessPool(nodes=num_processes)

        configs = []
        count = 0

        for day in days:
            self.config['RL_parameters'][
                'experiment'] = self.expt_name + "_" + str(count)
            self.config['RL_parameters'][
                'city_states_filename'] = day + 'city_states.dill'
            self.config['RL_parameters'][
                'best_model_filename'] = day + 'model.dill'
            configs.append(deepcopy(self.config))
            count += 1

        self.logger.info("Starting expt_06")

        results = pool.amap(self.run_rl_training, configs).get()
        pool.close()
        pool.join()
        pool.clear()

        self.logger.info("Finished expt_06")

        # Export best episode
        self.data_exporter.export_episode(results, self.expt_name + ".dill")
    def run(self):
        """
        Run experiment
        """
        num_drivers = np.arange(1000, 6500, 500)
        objectives = ['pickups', 'revenue']
        combinations = list(itertools.product(num_drivers, objectives))

        # Create a pool of processes
        num_processes = mp.cpu_count()
        pool = ProcessPool(nodes=num_processes)

        configs = []
        count = 0
        for comb in combinations:
            self.config['RL_parameters'][
                'experiment'] = self.expt_name + "_" + str(count)
            self.config['RL_parameters'][
                'city_states_filename'] = "city_states.dill"
            self.config['RL_parameters']['num_drivers'] = comb[0]
            self.config['RL_parameters']['num_strategic_drivers'] = comb[0]
            self.config['RL_parameters']['objective'] = comb[1]
            configs.append(deepcopy(self.config))
            count += 1

        self.logger.info("Starting expt_03")

        results = pool.amap(self.run_rl_training, configs).get()
        pool.close()
        pool.join()
        pool.clear()

        self.logger.info("Finished expt_03")

        # Export best episode
        self.data_exporter.export_episode(results, self.expt_name + ".dill")
Beispiel #15
0
            results = tuple(-s.bestEnergy for s in solver)  #NOTE: -1 for LUB
            #print('[id: %s] %s' % (i, tuple(s.bestSolution for s in solver)))
        else:
            results = -solver[axis].bestEnergy  #NOTE: -1 for LUB
            #print('[id: %s] %s' % (i, solver[axis].bestSolution))
        return results

    # outer-loop solver configuration and execution
    settings = dict(args=(axis, Ns),
                    bounds=bounds,
                    maxiter=1000,
                    maxfun=100000,
                    disp=1,
                    full_output=1,
                    itermon=stepmon,
                    ftol=1e-6,
                    npop=4,
                    gtol=4,
                    solver=PowellDirectionalSolver)  # xtol=1e-6)
    pool = Pool(settings['npop'])
    _map = pool.map
    result = _solver(cost, x0, map=_map, **settings)
    pool.close()
    pool.join()
    pool.clear()

    # get the best result (generally the same as returned by solver)
    m = stepmon.min()
    print("%s @ %s" % (m.y, m.x))  #NOTE: -1 for max, 1 for min
    #print("%s @ %s" % (result[1], result[0])) #NOTE: -1 for max, 1 for min
    def run(self):
        """
        Run experiment
        """
        days = [
            'Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday',
            'Saturday'
        ]
        weeks_of_month = ['00', '01', '02', '03', '04']
        imbalance_thresholds = [2]
        model_num_drivers = [4000, 5000, 6000, 7000, 8000, 9000, 10000]

        test_combinations = []
        for model_day in days:
            for model_wom in weeks_of_month:
                for model_threshold in imbalance_thresholds:
                    for model_drivers in model_num_drivers:
                        model_args = [
                            model_day, model_wom,
                            str(model_drivers),
                            str(model_threshold)
                        ]
                        model_filename = "_".join(model_args) + "_model.dill"
                        if os.path.isfile(self.config['app']['DATA_DIR'] +
                                          'models/' + model_filename):
                            for test_wom in weeks_of_month:
                                for test_drivers in range(
                                        model_drivers - 3000,
                                        model_drivers + 4000, 1000):
                                    test_file = model_day + '_' + test_wom + '_city_states.dill'
                                    if os.path.isfile(
                                            self.config['app']['DATA_DIR'] +
                                            'city_states/' + test_file):
                                        test_combinations.append({
                                            'model':
                                            model_filename,
                                            'test_dow':
                                            model_day,
                                            'test_wom':
                                            test_wom,
                                            'test_drivers':
                                            test_drivers
                                        })
        self.logger.info("Total test combinations: {}".format(
            len(test_combinations)))

        # Create a pool of processes
        num_processes = mp.cpu_count()
        pool = ProcessPool(nodes=num_processes)

        configs = []
        count = 0

        for comb in test_combinations:
            self.config['Model_testing'][
                'experiment'] = self.expt_name + "_" + str(count)
            self.config['Model_testing']['city_states_filename'] = (
                comb['test_dow'] + '_' + comb['test_wom'] +
                '_city_states.dill')
            self.config['Model_testing']['model_filename'] = comb['model']
            self.config['RL_parameters']['num_drivers'] = comb['test_drivers']
            self.config['RL_parameters']['num_strategic_drivers'] = comb[
                'test_drivers']

            configs.append(deepcopy(self.config))
            count += 1

        self.logger.info("Starting expt_08")

        results = pool.amap(self.run_rl_testing, configs).get()
        pool.close()
        pool.join()
        pool.clear()

        self.logger.info("Finished expt_08")

        # Export best episode
        self.data_exporter.export_episode(results, self.expt_name + ".dill")