def test_scheduler_tick_multiple_events(): executed_events = set() scheduler = Scheduler() scheduler.schedule(TrackedEvent("event one", executed_events), 1) scheduler.schedule(TrackedEvent("event two", executed_events), 2) scheduler.schedule(TrackedEvent("event three", executed_events), 2) scheduler.schedule(TrackedEvent("event four", executed_events), 3) scheduler.tick() assert len(scheduler.priority_queue._queue) == 3 assert executed_events == {"event one"} scheduler.tick() assert len(scheduler.priority_queue._queue) == 2 assert executed_events == {"event one", "event two"} scheduler.tick() assert len(scheduler.priority_queue._queue) == 1 assert executed_events == {"event one", "event two", "event three"} scheduler.tick() assert len(scheduler.priority_queue._queue) == 0 assert executed_events == { "event one", "event two", "event three", "event four" }
def test_scheduler_unschedule(): event = Event() scheduler = Scheduler() scheduler.schedule(event, 6) scheduler.schedule(Event(), 5) scheduler.schedule(Event(), 7) scheduler.unschedule(event) for priority, scheduled_event in scheduler.priority_queue._queue: assert scheduled_event is not event
import time from scheduling.Scheduler import * start = time.time() scheduler = Scheduler() scheduler.run() scheduler.print_history() scheduler.visualize_history() end = time.time() print("Elapsed time: %f s" % (end - start)) # # example for using visualize_history_file() history = read_json_file("scheduling_history.json") visualize_history_file(history) #calculate group average response time from history file group_response_time = get_group_avg_response_time(history) print(group_response_time) #Elapsed time: 381.918144 s #[10846.0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
type=str, default='../adv_agent/obs_rms.pkl') parser.add_argument('--x_method', type=str, default='grad') parser.add_argument('--surrogate_model', type=str, default="../agent-zoo/agent/YouShallNotPass_agent.pkl") parser.add_argument('--mimic_model_path', type=str, default="../agent-zoo/agent/mimic_model.h5") args = parser.parse_args() adv_agent_path = args.adv_agent_path adv_agent_norm_path = args.adv_agent_norm_path scheduler = Scheduler( annealer_dict={'lr': ConstantAnnealer(learning_rate)}) env_name = env_list[args.env] # define the env_path env_path = args.surrogate_model mimic_model_path = args.mimic_model_path env = gym.make(env_name) venv = SubprocVecEnv([ lambda: make_adv_multi2single_env(env_name, adv_agent_path, adv_agent_norm_path, False) for i in range(n_cpu) ]) venv = Monitor(venv, 0) rew_shape_venv = apply_reward_wrapper(single_env=venv,
def test_scheduler_tick(): scheduler = Scheduler() scheduler.schedule(TrackedEvent("event one", set()), 1) latest_event = scheduler.tick() assert latest_event.id == "event one"
def test_scheduler_schedule(): scheduler = Scheduler() scheduler.schedule(Event(), 5) assert len(scheduler.priority_queue._queue) == 1
last_checkpoint = step if step - log_interval > last_log: log_callback(logger, locals, globals) last_log = step return True model.learn(total_timesteps=total_timesteps, log_interval=1, callback=callback, seed=seed) if __name__ == "__main__": scheduler = Scheduler(annealer_dict={'lr': ConstantAnnealer(LR)}) # useless env_name = GAME_ENV # multi to single, apply normalization to victim agent's observation, reward, and diff reward. venv = SubprocVecEnv([ lambda: make_zoo_multi2single_env(env_name, VIC_AGT_ID, REW_SHAPE_PARAMS, scheduler, reverse=REVERSE, total_step=TRAINING_ITER) for i in range(N_GAME) ]) # test if REVERSE:
def setUp(self): self.s = Scheduler()
def setUp(self): self.pcb1 = PCB(3, 5, 10) self.pcb2 = PCB(2, 5, 10) self.pcb3 = PCB(1, 5, 10) self.scheduler = Scheduler()