def test_get_horizon():
    env = examples.double_reentrant_line_model()
    discount_factor = 0.9999
    overrides = {
        'BigStepPenaltyPolicyParams': {
            'boolean_action_flag': True,  # This should throw an error.
            'convex_solver': 'cvx.CPLEX'
        },
        'HedgehogHyperParams': {
            'activity_rates_policy_class_name': 'BigStepPolicy',
            'horizon_mpc_ratio': 0,
            'minimum_horizon': 1
        }
    }
    ac_params, wk_params, si_params, po_params, hh_params, _, dp_params, _ \
        = get_hedgehog_hyperparams(**overrides)
    pf_mip_agent = PureFeedbackMIPHedgehogAgent(
        env,
        discount_factor,
        wk_params,
        hh_params,
        ac_params,
        si_params,
        po_params,
        demand_planning_params=dp_params)
    assert pf_mip_agent.get_horizon() == 1
    def test_compare_maxweight_vs_maxweight_scheduling_double_reentrant_line_model(
            self):
        seed = 42
        np.random.seed(seed)
        initial_state = 50 * np.ones((5, 1))
        env = examples.double_reentrant_line_model(alpha=1,
                                                   mu1=4,
                                                   mu2=3,
                                                   mu3=2,
                                                   mu4=3,
                                                   mu5=4,
                                                   cost_per_buffer=np.ones(
                                                       (5, 1)),
                                                   initial_state=initial_state,
                                                   capacity=np.ones(
                                                       (5, 1)) * np.inf,
                                                   job_conservation_flag=True,
                                                   job_gen_seed=seed,
                                                   max_episode_length=None)
        smw_agent = smw.SchedulingMaxWeightAgent(env)
        mw_agent = mw.MaxWeightAgent(env)

        states = np.random.randint(50, size=(400, 5))
        for s in states:
            z_star_mw = mw_agent.max_weight_policy(s[:, None])
            z_star_smw = smw_agent.scheduling_max_weight_policy(s[:, None])
            self.assert_both_mw_policies_are_equivalent(
                s, env.cost_per_buffer,
                env.job_generator.buffer_processing_matrix, z_star_mw,
                z_star_smw)
Exemplo n.º 3
0
    def test_compare_maxweight_vs_maxweight_scheduling(self):
        seed = 42
        np.random.seed(seed)
        initial_state = 50 * np.ones((5, 1))
        env = examples.double_reentrant_line_model(initial_state=initial_state,
                                                   job_gen_seed=seed)
        sch_mw_agent = sch_mw.SchedulingMaxWeightAgent(env)
        mw_agent = mw_lp.MaxWeightLpAgent(env)

        states = np.random.randint(50, size=(400, 5))
        for s in states:
            z_star_mw, _ = mw_agent.max_weight_policy(s[:, None])
            z_star_sch_mw = sch_mw_agent.scheduling_max_weight_policy(s[:,
                                                                        None])
            self.assert_both_mw_policies_are_equivalent(
                s, env.cost_per_buffer,
                env.job_generator.buffer_processing_matrix, z_star_mw,
                z_star_sch_mw)
def test_get_horizon():
    env = examples.double_reentrant_line_model()
    discount_factor = 0.9999
    overrides = {
        'HedgehogHyperParams': {
            'horizon_mpc_ratio': 0,
            'minimum_horizon': 1
        }
    }
    ac_params, wk_params, si_params, _, hh_params, _, _, dp_params \
        = get_hedgehog_hyperparams(**overrides)
    pf_mip_agent = PureFeedbackStationaryHedgehogAgent(
        env,
        discount_factor,
        wk_params,
        hh_params,
        ac_params,
        si_params,
        demand_planning_params=dp_params)
    assert pf_mip_agent.get_horizon() == 1
 def test_integration_double_reentrant_line_model(self):
     seed = 42
     np.random.seed(seed)
     initial_state = 50 * np.ones((5, 1))
     env = examples.double_reentrant_line_model(
         alpha=1,
         mu1=4,
         mu2=3,
         mu3=2,
         mu4=3,
         mu5=4,
         cost_per_buffer=np.array([1, 1, 1, 1, 1])[:, None],
         initial_state=initial_state,
         capacity=np.ones((5, 1)) * np.inf,
         job_conservation_flag=True,
         job_gen_seed=seed,
         max_episode_length=None)
     agent = mw.MaxWeightAgent(env)
     simulator = ps.SncSimulator(env, agent, discount_factor=0.95)
     data_mw = simulator.run(num_simulation_steps=1000)
     assert np.all(data_mw['state'][-1] < initial_state)
Exemplo n.º 6
0
 def test_set_action_draining_empty_buffer_to_zero_double_reentrant_line_model(
         self):
     """
     The output of the LP for this example gives nonzero output to action draining buffer 5,
     which is empty.
     """
     env = examples.double_reentrant_line_model(alpha=0.25,
                                                mu1=1,
                                                mu2=0.75,
                                                mu3=0.5,
                                                mu4=0.75,
                                                mu5=1,
                                                initial_state=5 * np.ones(
                                                    (5, 1)))
     opt_val = -21.25
     z_star = np.array([[1], [0], [1], [1], [1]])
     state = np.array([[54], [54], [29], [27], [0]])
     diag_cost = np.eye(5)
     z_star_zeroed = mw_lp.set_action_draining_empty_buffer_to_zero(
         opt_val, z_star, state, env.job_generator.buffer_processing_matrix,
         diag_cost)
     assert np.all(z_star_zeroed[:-1] == z_star[:-1])
     assert z_star_zeroed[4] == 0
def test_create_agent_with_mip_flag_set_to_false():
    env = examples.double_reentrant_line_model()
    discount_factor = 0.9999
    overrides = {
        'BigStepPenaltyPolicyParams': {
            'boolean_action_flag': False,  # This should throw an error.
            'convex_solver': 'cvx.CPLEX'
        },
        'HedgehogHyperParams': {
            'activity_rates_policy_class_name': 'BigStepPolicy'
        }
    }
    ac_params, wk_params, si_params, po_params, hh_params, _, dp_params, _ \
        = get_hedgehog_hyperparams(**overrides)
    with pytest.raises(AssertionError):
        _ = PureFeedbackMIPHedgehogAgent(env,
                                         discount_factor,
                                         wk_params,
                                         hh_params,
                                         ac_params,
                                         si_params,
                                         po_params,
                                         demand_planning_params=dp_params)