Ejemplo n.º 1
0
    def setUp(self) -> None:
        """
        Create a planner and a `simpy` environment in which to run dummy
        simulations for the purpose of ensuring the planner works nicely
        when selecting 'batch' as a static scheduling method.
        Returns
        -------

        """
        self.env = simpy.Environment()
        config = Config(CONFIG)
        self.model = BatchPlanning('batch')

        self.cluster = Cluster(self.env, config=config)
        self.buffer = Buffer(env=self.env, cluster=self.cluster, config=config)
        self.planner = Planner(
            self.env,
            PLAN_ALGORITHM,
            self.cluster,
            self.model,
        )
        self.telescope = Telescope(self.env,
                                   config,
                                   planner=None,
                                   scheduler=None)
Ejemplo n.º 2
0
 def testPlannerBasicConfig(self):
     planner = Planner(self.env, PLAN_ALGORITHM, self.cluster)
     available_resources = planner.cluster_to_shadow_format()
     # TODO write tests for the plan
     self.assertEqual(1.0, available_resources['system']['bandwidth'])
     machine = available_resources['system']['resources']['cat0_m0']
     self.assertEqual(
         84, available_resources['system']['resources']['cat0_m0']['flops'])
Ejemplo n.º 3
0
 def setUp(self):
     self.env = simpy.Environment()
     self.cluster = Cluster(self.env, CLUSTER_CONFIG)
     self.planner = Planner(self.env, PLAN_ALGORITHM, self.cluster)
     self.observation = Observation('planner_observation',
                                    OBS_START_TME,
                                    OBS_DURATION,
                                    OBS_DEMAND,
                                    OBS_WORKFLOW,
                                    type=None,
                                    data_rate=None)
Ejemplo n.º 4
0
 def setUp(self):
     self.env = simpy.Environment()
     sched_algorithm = FifoAlgorithm()
     self.planner = Planner(self.env, PLAN_ALGORITHM, MACHINE_CONFIG)
     self.cluster = Cluster(self.env, CLUSTER_CONFIG)
     # self.buffer = Buffer(self.env, self.cluster)
     # self.algorithms = Scheduler(self.env,
     # sched_algorithm, self.buffer, self.cluster)
     self.observation = Observation('planner_observation', OBS_START_TME,
                                    OBS_DURATION, OBS_DEMAND, OBS_WORKFLOW)
     pass
Ejemplo n.º 5
0
 def setUp(self) -> None:
     self.env = simpy.Environment()
     sched_algorithm = GreedyAlgorithmFromPlan()
     config = Config(LONG_CONFIG)
     self.cluster = Cluster(self.env, config)
     self.planner = Planner(self.env, PLANNING_ALGORITHM, self.cluster,
                            SHADOWPlanning('heft'))
     self.buffer = Buffer(self.env, self.cluster, config)
     self.scheduler = Scheduler(self.env, self.buffer, self.cluster,
                                sched_algorithm)
     self.telescope = Telescope(self.env, config, self.planner,
                                self.scheduler)
Ejemplo n.º 6
0
 def setUp(self):
     self.env = simpy.Environment()
     sched_algorithm = DynamicAlgorithmFromPlan()
     config = Config(LONG_CONFIG)
     self.cluster = Cluster(self.env, config)
     planning_model = SHADOWPlanning(algorithm=PLANNING_ALGORITHM)
     self.planner = Planner(self.env, PLANNING_ALGORITHM, self.cluster,
                            planning_model)
     self.buffer = Buffer(self.env, self.cluster, config)
     self.scheduler = Scheduler(self.env, self.buffer, self.cluster,
                                sched_algorithm)
     self.telescope = Telescope(self.env, config, self.planner,
                                self.scheduler)
Ejemplo n.º 7
0
    def setUp(self):
        self.env = simpy.Environment()
        self.cluster = Cluster(env=self.env, spec=CLUSTER_CONFIG)

        self.buffer = Buffer(env=self.env,
                             cluster=self.cluster,
                             config=BUFFER_CONFIG)
        self.planner = Planner(self.env, PLAN_ALGORITHM, self.cluster)
        self.observation = Observation('scheduler_observation',
                                       OBS_START_TME,
                                       OBS_DURATION,
                                       OBS_DEMAND,
                                       OBS_WORKFLOW,
                                       type='continuum',
                                       data_rate=2)
Ejemplo n.º 8
0
    def setUp(self):
        """
        Repeating above test cases but with delays to determine that delay
        flags reach us.
        Returns
        -------

        """

        self.env = simpy.Environment()
        config = Config(INTEGRATION)
        self.cluster = Cluster(self.env, config)
        self.buffer = Buffer(self.env, self.cluster, config)
        dm = DelayModel(0.9, "normal",
                   DelayModel.DelayDegree.HIGH)
        self.planner = Planner(
            self.env, PLANNING_ALGORITHM,
            self.cluster, SHADOWPlanning('heft',delay_model=dm), delay_model=dm
        )

        self.scheduler = Scheduler(
            self.env, self.buffer, self.cluster, DynamicAlgorithmFromPlan()
        )
        self.telescope = Telescope(
            self.env, config, self.planner, self.scheduler
        )
        self.env.process(self.cluster.run())
        self.env.process(self.buffer.run())
        self.scheduler.start()
        self.env.process(self.scheduler.run())
        self.env.process(self.telescope.run())
Ejemplo n.º 9
0
class TestSchedulerDynamicReAllocation(unittest.TestCase):
    def setUp(self) -> None:
        self.env = simpy.Environment()
        sched_algorithm = GreedyAlgorithmFromPlan()
        config = Config(LONG_CONFIG)
        self.cluster = Cluster(self.env, config)
        self.planner = Planner(self.env, PLANNING_ALGORITHM, self.cluster,
                               SHADOWPlanning('heft'))
        self.buffer = Buffer(self.env, self.cluster, config)
        self.scheduler = Scheduler(self.env, self.buffer, self.cluster,
                                   sched_algorithm)
        self.telescope = Telescope(self.env, config, self.planner,
                                   self.scheduler)

    def test_reallocation_with_plan(self):
        curr_obs = self.telescope.observations[0]
        self.scheduler.observation_queue.append(curr_obs)
        curr_obs.ast = self.env.now
        curr_obs.plan = self.planner.run(curr_obs, self.buffer,
                                         self.telescope.max_ingest)
        self.env.process(self.scheduler.allocate_tasks(curr_obs))
        self.env.run(1)
        self.buffer.cold[0].observations['stored'].append(curr_obs)
        self.env.run(until=299)
        self.assertEqual(0, len(self.scheduler.observation_queue))
Ejemplo n.º 10
0
class TestWorkflowPlan(unittest.TestCase):
    def setUp(self):
        self.env = simpy.Environment()
        self.cluster = Cluster(self.env, CLUSTER_CONFIG)
        self.planner = Planner(self.env, PLAN_ALGORITHM, self.cluster)
        self.observation = Observation('planner_observation',
                                       OBS_START_TME,
                                       OBS_DURATION,
                                       OBS_DEMAND,
                                       OBS_WORKFLOW,
                                       type=None,
                                       data_rate=None)

    def tearDown(self):
        pass

    def testWorkflowPlanCreation(self):
        plan = self.planner.plan(self.observation.name,
                                 self.observation.workflow, 'heft')
        expected_exec_order = [0, 5, 3, 4, 2, 1, 6, 8, 7, 9]
        self.assertEqual(len(plan.tasks), len(expected_exec_order))
        for x in range(len(plan.tasks)):
            self.assertEqual(plan.tasks[x].id, expected_exec_order[x])
        # Get taskid 5
        task5_comp = plan.tasks[5].flops
        self.assertEqual(task5_comp, 92000)
Ejemplo n.º 11
0
 def setUp(self):
     self.env = simpy.Environment()
     cluster = Cluster(env=self.env, spec=CLUSTER_CONFIG)
     buffer = Buffer(env=self.env, cluster=cluster, config=BUFFER_CONFIG)
     self.scheduler = Scheduler(env=self.env,
                                buffer=buffer,
                                cluster=cluster,
                                algorithm=None)
     planner = Planner(self.env, 'heft', cluster)
Ejemplo n.º 12
0
 def setUp(self):
     self.env = simpy.Environment()
     config = Config(CONFIG)
     self.model = SHADOWPlanning('heft')
     self.cluster = Cluster(self.env, config=config)
     self.buffer = Buffer(env=self.env, cluster=self.cluster, config=config)
     self.planner = Planner(
         self.env,
         PLAN_ALGORITHM,
         self.cluster,
         self.model,
     )
     self.observation = Observation('planner_observation',
                                    OBS_START_TME,
                                    OBS_DURATION,
                                    OBS_DEMAND,
                                    OBS_WORKFLOW,
                                    data_rate=OBS_DATA_RATE)
Ejemplo n.º 13
0
    def setUp(self):
        self.algorithm = BatchProcessing
        self.env = simpy.Environment()
        config = Config(CONFIG)
        self.cluster = Cluster(self.env, config=config)
        self.buffer = Buffer(self.env, self.cluster, config)
        self.scheduler = Scheduler(self.env, self.buffer, self.cluster,
                                   DynamicAlgorithmFromPlan())
        self.algorithm = BatchProcessing()
        self.model = BatchPlanning('batch')
        self.planner = Planner(
            self.env,
            'heft',
            self.cluster,
            self.model,
        )

        self.telescope = Telescope(self.env, config, self.planner,
                                   self.scheduler)
Ejemplo n.º 14
0
 def setUp(self):
     self.env = simpy.Environment()
     self.config = Config(CONFIG)
     cluster = Cluster(env=self.env, config=self.config)
     buffer = Buffer(env=self.env, cluster=cluster, config=self.config)
     self.scheduler = Scheduler(env=self.env,
                                buffer=buffer,
                                cluster=cluster,
                                algorithm=None)
     planner = Planner(self.env, 'heft', cluster, SHADOWPlanning)
Ejemplo n.º 15
0
 def setUp(self):
     self.env = simpy.Environment()
     sched_algorithm = DynamicAlgorithmFromPlan()
     config = Config(HEFT_CONFIG)
     dm = DelayModel(0.1, "normal")
     self.model = SHADOWPlanning('heft', dm)
     self.cluster = Cluster(self.env, config=config)
     self.buffer = Buffer(self.env, self.cluster, config)
     self.planner = Planner(self.env,
                            PLAN_ALGORITHM,
                            self.cluster,
                            self.model,
                            delay_model=dm)
     self.observation = Observation('planner_observation',
                                    OBS_START_TME,
                                    OBS_DURATION,
                                    OBS_DEMAND,
                                    OBS_WORKFLOW,
                                    data_rate=OBS_DATA_RATE)
Ejemplo n.º 16
0
class TestPlanner(unittest.TestCase):
    def setUp(self):
        self.env = simpy.Environment()
        sched_algorithm = FifoAlgorithm()
        self.planner = Planner(self.env, PLAN_ALGORITHM, MACHINE_CONFIG)
        self.cluster = Cluster(self.env, CLUSTER_CONFIG)
        # self.buffer = Buffer(self.env, self.cluster)
        # self.algorithms = Scheduler(self.env,
        # sched_algorithm, self.buffer, self.cluster)
        self.observation = Observation('planner_observation', OBS_START_TME,
                                       OBS_DURATION, OBS_DEMAND, OBS_WORKFLOW)
        pass

    def tearDown(self):
        pass

    def testShadowIntegration(self):
        pass

    def testPlanReadsFromFile(self):
        # Return a "Plan" object for provided workflow/observation
        plan = self.planner.plan(self.observation.name,
                                 self.observation.workflow, PLAN_ALGORITHM)
        self.assertEqual(plan.id,
                         'planner_observation')  # Expected ID for the workflow
        self.assertEqual(plan.makespan,
                         98)  # Expected makespan for the given graph

    def testPlannerRun(self):
        next(self.planner.run(self.observation))
        # because run() is a generator (we call yield for simpy), we use(next()) to 'get the return value',
        # and thus run the rest of the code in run()
        # next(val)
        self.assertTrue(self.observation.plan is not None)

    def testGracefulExit(self):
        pass

    def testIncorrectParameters(self):
        pass
Ejemplo n.º 17
0
    def setUp(self):
        self.env = simpy.Environment()
        sched_algorithm = FifoAlgorithm()
        self.planner = Planner(self.env, test_data.planning_algorithm,
                               test_data.machine_config)
        self.cluster = Cluster(self.env, CLUSTER_CONFIG)
        self.buffer = Buffer(self.env, self.cluster, BUFFER_CONFIG)
        self.observations = [
            Observation('scheduler_observation',
                        OBS_START_TME,
                        OBS_DURATION,
                        OBS_DEMAND,
                        OBS_WORKFLOW,
                        type='continuum',
                        data_rate=5)
        ]
        telescopemax = 36  # maximum number of antennas

        self.telescope = Telescope(self.env, OBSERVATION_CONFIG,
                                   self.scheduler, self.planner)
        self.scheduler = Scheduler(self.env, sched_algorithm, self.buffer,
                                   self.cluster)
Ejemplo n.º 18
0
 def testPlannerBasicConfig(self):
     planner = Planner(env=self.env,
                       algorithm=PLAN_ALGORITHM,
                       cluster=self.cluster,
                       model=self.model)
     available_resources = planner.model._cluster_to_shadow_format(
         self.cluster)
     # Bandwidth set at 1gb/s = 60gb/min.
     self.assertEqual(60.0, available_resources['system']['bandwidth'])
     machine = available_resources['system']['resources']['cat0_m0']
     self.assertEqual(
         5040,
         available_resources['system']['resources']['cat0_m0']['flops'])
Ejemplo n.º 19
0
class TestPlannerDelay(unittest.TestCase):
    def setUp(self):
        self.env = simpy.Environment()
        sched_algorithm = DynamicAlgorithmFromPlan()
        config = Config(HEFT_CONFIG)
        dm = DelayModel(0.1, "normal")
        self.model = SHADOWPlanning('heft', dm)
        self.cluster = Cluster(self.env, config=config)
        self.buffer = Buffer(self.env, self.cluster, config)
        self.planner = Planner(self.env,
                               PLAN_ALGORITHM,
                               self.cluster,
                               self.model,
                               delay_model=dm)
        self.observation = Observation('planner_observation',
                                       OBS_START_TME,
                                       OBS_DURATION,
                                       OBS_DEMAND,
                                       OBS_WORKFLOW,
                                       data_rate=OBS_DATA_RATE)

    def testShadowIntegration(self):
        pass

    def testPlannerRun(self):
        """
        because run() is a generator (we call yield for simpy),
        we use(next()) to 'get the return value',
        and thus run the rest of the code in run()  next(val)
        """

        # self.assertRaises(
        #     RuntimeError,
        #     next,
        #     self.planner.run(self.observation, self.buffer)
        # )
        self.observation.ast = self.env.now
        self.observation.plan = self.planner.run(self.observation, self.buffer,
                                                 TEL_MAX_INGEST)
        self.assertTrue(self.observation.plan is not None)
        self.assertTrue(0.1, self.observation.plan.tasks[0].delay.prob)

    def testGracefulExit(self):
        pass

    def testIncorrectParameters(self):
        pass
Ejemplo n.º 20
0
    def setUp(self):
        self.env = simpy.Environment()
        config = Config(INTEGRATION)
        self.cluster = Cluster(self.env, config)
        self.buffer = Buffer(self.env, self.cluster, config)
        self.planner = Planner(self.env, PLANNING_ALGORITHM, self.cluster,
                               SHADOWPlanning('heft'))

        self.scheduler = Scheduler(self.env, self.buffer, self.cluster,
                                   DynamicAlgorithmFromPlan())
        self.telescope = Telescope(self.env, config, self.planner,
                                   self.scheduler)
        self.env.process(self.cluster.run())
        self.env.process(self.buffer.run())
        self.scheduler.start()
        self.env.process(self.scheduler.run())
        self.env.process(self.telescope.run())
Ejemplo n.º 21
0
class TestBatchProcessingPlan(unittest.TestCase):
    def setUp(self) -> None:
        """
        Create a planner and a `simpy` environment in which to run dummy
        simulations for the purpose of ensuring the planner works nicely
        when selecting 'batch' as a static scheduling method.
        Returns
        -------

        """
        self.env = simpy.Environment()
        config = Config(CONFIG)
        self.model = BatchPlanning('batch')

        self.cluster = Cluster(self.env, config=config)
        self.buffer = Buffer(env=self.env, cluster=self.cluster, config=config)
        self.planner = Planner(
            self.env,
            PLAN_ALGORITHM,
            self.cluster,
            self.model,
        )
        self.telescope = Telescope(self.env,
                                   config,
                                   planner=None,
                                   scheduler=None)

    def test_generate_topological_sort(self):
        """
        This is the main component of the batch_planning system - we just
        return a topological sort of the tasks and a list of precedence
        resources and wrap it into the 'WorkflowPlan' object.

        Returns
        -------
        plan : core.planner.WorkflowPlan
            WorkflowPlan object for the observation
        """
        obs = self.telescope.observations[0]
        plan = self.planner.run(obs, self.buffer, TEL_MAX_INGEST)
        order = [0, 1, 2, 3, 4, 5, 6, 8, 7, 9]
        self.assertIsNotNone(plan)
        self.assertListEqual(order, plan.exec_order)

    def tearDown(self) -> None:
        pass
Ejemplo n.º 22
0
	def __init__(
			self,
			env,
			telescope_config,
			cluster_config,
			buffer_config,
			planning_algorithm,
			scheduling_algorithm,
			event_file,
			visualisation=False
	):

		self.env = env
		# Event file setup
		self.event_file = event_file
		self.visualisation = visualisation
		if event_file is not None:
			self.monitor = Monitor(self)
		if visualisation:
			self.visualiser = Visualiser(self)
		# Process necessary config files

		# Initiaise Actor and Resource objects

		self.cluster = Cluster(env, cluster_config)
		self.buffer = Buffer(env, self.cluster, config=buffer_config)
		self.planner = Planner(env, planning_algorithm, cluster_config)
		self.scheduler = Scheduler(
			env, self.buffer, self.cluster, scheduling_algorithm
		)

		self.telescope = Telescope(
			env=self.env,
			config=telescope_config,
			planner=self.planner,
			scheduler=self.scheduler
		)
Ejemplo n.º 23
0
    def __init__(self,
                 env,
                 config,
                 instrument,
                 planning_model,
                 planning_algorithm,
                 scheduling,
                 delay=None,
                 timestamp=None,
                 to_file=False,
                 hdf5_path=None,
                 **kwargs):

        #: :py:obj:`simpy.Environment` object
        self.env = env

        if timestamp:
            #: :py:obj:`~topsim.core.monitor.Monitor` instance
            self.monitor = Monitor(self, timestamp)
            self._timestamp = timestamp
        else:
            sim_start_time = f'{time.time()}'.split('.')[0]
            self._timestamp = sim_start_time
            self.monitor = Monitor(self, sim_start_time)
        # Process necessary config files

        self._cfg_path = config  #: Configuration path

        # Initiaise Actor and Resource objects
        cfg = Config(config)
        #: :py:obj:`~topsim.core.cluster.Cluster` instance
        self.cluster = Cluster(env, cfg)
        #: :py:obj:`~topsim.core.buffer.Buffer` instance
        self.buffer = Buffer(env, self.cluster, cfg)
        planning_algorithm = planning_algorithm
        planning_model = planning_model

        if not delay:
            # TODO Have this approach replicated so we don't specify the
            #  model outside the simulation.
            delay = DelayModel(0.0, "normal", DelayModel.DelayDegree.NONE)
        self.planner = Planner(env, planning_algorithm, self.cluster,
                               planning_model, delay)
        scheduling_algorithm = scheduling()
        #: :py:obj:`~topsim.core.scheduler.Scheduler` instance
        self.scheduler = Scheduler(env, self.buffer, self.cluster,
                                   scheduling_algorithm)
        #: User-defined :py:obj:`~topsim.core.instrument.Instrument` instance
        self.instrument = instrument(env=self.env,
                                     config=cfg,
                                     planner=self.planner,
                                     scheduler=self.scheduler)

        #: :py:obj:`bool` Flag for producing simulation output in a `.pkl`
        # file.
        self.to_file = to_file
        if self.to_file and hdf5_path:
            try:
                if os.path.exists(hdf5_path):
                    LOGGER.warning('Output HDF5 path already exists, '
                                   'simulation appended to existing file')
                self._hdf5_store = pd.HDFStore(hdf5_path)
                self._hdf5_store.close()
            except ValueError(
                    'Check pandas.HDFStore documentation for valid file path'):
                raise
        elif self.to_file and not hdf5_path:
            raise ValueError(
                'Attempted to initialise Simulation object that outputs'
                'to file without providing file path')
        else:
            LOGGER.info(
                'Simulation output will not be stored directly to file')

        if 'delimiters' in kwargs:
            #: Used to separate different simulations in HDF5 output
            self._delimiters = kwargs['delimiters']
        else:
            self._delimiters = ''

        self.running = False
Ejemplo n.º 24
0
class TestColdBufferRequests(unittest.TestCase):
    def setUp(self):
        self.env = simpy.Environment()
        self.cluster = Cluster(env=self.env, spec=CLUSTER_CONFIG)

        self.buffer = Buffer(env=self.env,
                             cluster=self.cluster,
                             config=BUFFER_CONFIG)
        self.planner = Planner(self.env, PLAN_ALGORITHM, self.cluster)
        self.observation = Observation('scheduler_observation',
                                       OBS_START_TME,
                                       OBS_DURATION,
                                       OBS_DEMAND,
                                       OBS_WORKFLOW,
                                       type='continuum',
                                       data_rate=2)

    def tearDown(self):
        pass

    def testHotColdInteraction(self):
        """
		Testing the results of running 'buffer.request_data_from(observation)'.

		Returns
		-------
		"""
        # TODO THIS NEED TO CHANGE
        # TODO Hot Cold transfer should be automatic, not instigated by the
        #  scheduler. THis ensures that the scheduler only needs to check the
        #  cold buffer, and that movement of data from the hot buffer to the
        #  cold buffer is 'automatic' (that is, once data has been through
        #  the hot buffer completely and INGEST run on that data, we can move
        #  it to a large buffer store).
        # Prelimns
        self.observation.status = RunStatus.RUNNING
        self.env.process(self.buffer.ingest_data_stream(self.observation))
        self.env.run(until=10)
        self.assertEqual(480, self.buffer.hot.current_capacity)

        # Moving data from one to the other
        self.assertEqual(250, self.buffer.cold.current_capacity)
        self.env.process(self.buffer.request_data_from(self.observation))
        self.env.run(until=15)
        self.assertEqual(240, self.buffer.cold.current_capacity)
        self.assertEqual(490, self.buffer.hot.current_capacity)
        self.env.run(until=40)
        self.assertEqual(230, self.buffer.cold.current_capacity)
        self.assertEqual(500, self.buffer.hot.current_capacity)
        self.assertListEqual([self.observation], self.buffer.cold.observations)

    def testHotColdErrors(self):
        """
		We haven't processed the observation yet, so there shouldn't be
		anything in the Hot Buffer to request
		"""
        self.env.process(self.buffer.request_data_from(self.observation))
        self.assertRaises(
            RuntimeError,
            self.env.run,
            until=10,
        )

    def testWorkflowAddedToQueue(self):
        """
		We only add a workflow to the queue once an observation has finished
		(and, therefore, after we have finished generating a plan for it).
		:return: None
		"""

        # Calling planner.run() will store the generate plan in the observation object
        # calling next() runs the iterator immediately after generator is called
        next(self.planner.run(self.observation))
        # Buffer observation queue should be empty
        self.assertTrue(self.buffer.observations_for_processing.empty())
        # self.buffer.add_observation_to_waiting_workflows(self.observation)
        self.assertTrue(self.buffer.observations_for_processing.size() == 1)
Ejemplo n.º 25
0
class TestSchedulerFIFO(unittest.TestCase):
    def setUp(self):
        self.env = simpy.Environment()
        sched_algorithm = FifoAlgorithm()
        self.planner = Planner(self.env, test_data.planning_algorithm,
                               test_data.machine_config)
        self.cluster = Cluster(self.env, CLUSTER_CONFIG)
        self.buffer = Buffer(self.env, self.cluster, BUFFER_CONFIG)
        self.observations = [
            Observation('scheduler_observation',
                        OBS_START_TME,
                        OBS_DURATION,
                        OBS_DEMAND,
                        OBS_WORKFLOW,
                        type='continuum',
                        data_rate=5)
        ]
        telescopemax = 36  # maximum number of antennas

        self.telescope = Telescope(self.env, OBSERVATION_CONFIG,
                                   self.scheduler, self.planner)
        self.scheduler = Scheduler(self.env, sched_algorithm, self.buffer,
                                   self.cluster)

    def tearDown(self):
        pass

    def testSchedulerDecision(self):
        # algorithms.make_decision() will do something interesting only when we add a workflow plan to the
        # buffer.
        next(self.planner.run(self.observations[0]))
        #  Observation is what we are interested in with the algorithms, because the observation stores the plan;
        #  The observation object is what is stored in the buffer's 'observations_for_processing' queue.
        self.buffer.add_observation_to_waiting_workflows(self.observations[0])
        '''
		Lets start doing algorithms things!
		IT is important to note that the algorithms is only effective within the context of a simulation,
		as it is directly affected by calls to env.now; this means we need to run a mini-simulation in this
		test - which we can 'simulate' - haha - by using the enviroment and clever timeouts.
		We get an observaiton into the buffer, the algorithms makes a decision - what then?
		We use check_buffer to update the workflows in the algorithms workflow list
		This is called every time-step in the simulation, and is how we add workflow plans to the schedulers list
		'''

        test_flag = True
        self.env.process(self.scheduler.run())
        self.env.run(until=1)
        print(self.env.now)
        # We should be able to get this working nicely
        """
		For this experiment, we are running the scheduler on a single observation, and getting it 
		to allocate a task to the required machine. the first task should be scheduled at T = 0, 
		so at t = 1, we should check to make sure that the target has been scheduled, and that it is on the appropriate 
		machine
		"""
        # Generate list of IDs
        expected_machine = "cat2_m2"
        expected_task_no = 0
        self.assertTrue(self.cluster.running_tasks)
        for m in self.cluster.machines:
            if m.id == expected_machine:
                self.assertEqual(m.current_task.id, expected_task_no)
        # Need to assert that there is something in cluster.running_tasks
        # first element of running tasks should be the first task
        self.env.run(until=100)
        print(self.env.now)
        while test_flag:
            next(self.algorithms.run())
Ejemplo n.º 26
0
class TestSchedulerDynamicPlanAllocation(unittest.TestCase):
    def setUp(self):
        self.env = simpy.Environment()
        sched_algorithm = DynamicAlgorithmFromPlan()
        config = Config(HEFT_CONFIG)
        self.cluster = Cluster(self.env, config)
        self.planner = Planner(self.env, PLANNING_ALGORITHM, self.cluster,
                               SHADOWPlanning('heft'))
        self.buffer = Buffer(self.env, self.cluster, config)
        self.scheduler = Scheduler(self.env, self.buffer, self.cluster,
                                   sched_algorithm)
        self.telescope = Telescope(self.env, config, self.planner,
                                   self.scheduler)

    def tearDown(self):
        pass

    def testAllocationTasksNoObservation(self):
        """
        allocate_tasks assumes we have:

            * An observation stored in the ColdBuffer
            * A plan stored for that observation
            * Access to a scheduling algorithm (in this case, FifoAlgorithm).

        Need to check:
            * If there is no current observation, we can't do anthing
            * If there is an observation, but no plan, we assign the
            observation planto the current_plan.
            * Once things are running, we make sure things are being
            scheduled onto the right machines
            * They should also be running for the correct period of time.

        The allocations for the HEFT algorithm are (in sorted order):
            id - mid    - (ast,aft)
            0 - cat2_m2 - (0,11)
            3 - cat2_m2 - (11,21)
            2 - cat2_m2 - (21,30)
            4 - cat1_m1 - (22, 40)
            1 - cat0_m0 - (29,42)
            5 - cat2_m2 - (30,45)
            6 - cat2_m2 - (45, 55)
            8 - cat2_m2 - (58, 71)
            7 - cat0_m0 - (60, 61)
            9 - cat0_m0 - (84,98)

        """
        curr_obs = self.telescope.observations[0]
        gen = self.scheduler.allocate_tasks(curr_obs)
        self.assertRaises(RuntimeError, next, gen)
        l = [0, 3, 2, 4, 1, 5, 6, 8, 7, 9]
        exec_ord = [
            curr_obs.name + '_' + str(self.env.now) + '_' + str(tid)
            for tid in l
        ]
        self.scheduler.observation_queue.append(curr_obs)
        curr_obs.ast = self.env.now
        curr_obs.plan = self.planner.run(curr_obs, self.buffer,
                                         self.telescope.max_ingest)
        self.env.process(self.scheduler.allocate_tasks(curr_obs))
        self.env.run(1)
        self.assertListEqual(l, [a.task.tid for a in curr_obs.plan.exec_order])
        self.buffer.cold[0].observations['stored'].append(curr_obs)
        self.env.run(until=99)
        self.assertEqual(10, len(self.cluster._tasks['finished']))
        self.assertEqual(0, len(self.cluster._tasks['running']))
        self.assertEqual(0, len(self.scheduler.observation_queue))
Ejemplo n.º 27
0
class TestWorkflowPlan(unittest.TestCase):
    def setUp(self):
        self.env = simpy.Environment()
        config = Config(CONFIG)
        self.model = SHADOWPlanning('heft')
        self.cluster = Cluster(self.env, config=config)
        self.buffer = Buffer(env=self.env, cluster=self.cluster, config=config)
        self.planner = Planner(
            self.env,
            PLAN_ALGORITHM,
            self.cluster,
            self.model,
        )
        self.observation = Observation('planner_observation',
                                       OBS_START_TME,
                                       OBS_DURATION,
                                       OBS_DEMAND,
                                       OBS_WORKFLOW,
                                       data_rate=OBS_DATA_RATE)

    def tearDown(self):
        pass

    def testWorkflowPlanCreation(self):
        """
        Notes
        -------
        id rank
        0 6421.0
        5 4990.0
        3 4288.0
        4 4240.0
        2 3683.0
        1 4077.0
        6 2529.0
        8 2953.0
        7 2963.0
        9 1202.0
        Returns
        -------
        True if passes all tests, false otherwise
        """

        time = self.env.now
        # self.assertRaises(
        #     RuntimeError,
        #     next, self.planner.run(self.observation, self.buffer)
        # )
        self.observation.ast = self.env.now
        plan = self.planner.run(self.observation, self.buffer, TEL_MAX_INGEST)
        # plan = self.planner.plan(
        #     self.observation,
        #     self.observation.workflow,
        #     'heft',
        #     self.buffer
        # )

        expected_exec_order = [0, 5, 3, 4, 2, 1, 6, 8, 7, 9]
        self.assertEqual(len(plan.tasks), len(expected_exec_order))
        for x in range(len(plan.tasks)):
            self.assertEqual(
                plan.tasks[x].id,
                'planner_observation_{0}_{1}'.format(time,
                                                     expected_exec_order[x]))
        # Get taskid 5
        task5_comp = plan.tasks[5].flops
        self.assertEqual(task5_comp, 5520000)
Ejemplo n.º 28
0
class TestBufferRequests(unittest.TestCase):

    def setUp(self):
        self.env = simpy.Environment()
        self.config = Config(CONFIG)
        self.cluster = Cluster(env=self.env, config=self.config)

        self.buffer = Buffer(
            env=self.env, cluster=self.cluster, config=self.config
        )
        self.planner = Planner(
            self.env, PLAN_ALGORITHM, self.cluster, SHADOWPlanning('heft')
        )
        self.observation = Observation(
            'scheduler_observation',
            OBS_START_TME,
            OBS_DURATION,
            OBS_DEMAND,
            OBS_WORKFLOW,
            data_rate=2
        )

    def tearDown(self):
        pass

    def test_buffer_hot_to_cold(self):
        """
        This tests an ingest, and then, once we have a successful ingest,
        the movement from one buffer to the other.

        Using the current situation, we should have the observation finished by
        timestep [TBC], and then the observation moved across by timestep [TBC]

        Returns
        -------

        """
        self.observation.status = RunStatus.RUNNING
        self.env.process(self.buffer.ingest_data_stream(self.observation))
        self.env.run(until=10)
        self.assertEqual(480, self.buffer.hot[BUFFER_ID].current_capacity)

        # Moving data from one to the other
        self.assertEqual(250, self.buffer.cold[BUFFER_ID].current_capacity)
        self.assertTrue(self.observation in
                        self.buffer.hot[BUFFER_ID].observations["stored"])
        self.env.process(self.buffer.move_hot_to_cold(0))
        self.env.run(until=15)
        self.assertEqual(240, self.buffer.cold[BUFFER_ID].current_capacity)
        self.assertEqual(490, self.buffer.hot[BUFFER_ID].current_capacity)
        self.env.run(until=20)
        self.assertEqual(230, self.buffer.cold[BUFFER_ID].current_capacity)
        self.env.run(until=22)
        self.assertEqual(500, self.buffer.hot[BUFFER_ID].current_capacity)
        self.assertEqual(230, self.buffer.cold[BUFFER_ID].current_capacity)
        self.assertListEqual(
            [self.observation],
            self.buffer.cold[BUFFER_ID].observations['stored']
        )

    def test_hot_transfer_observation(self):
        """
        When passed an observation, over a period of time ensure that the
        complete data set is removed.

        Only when all data has finished being transferred do we add the
        observation to ColdBuffer.observations.

        Observation duration is 10; ingest rate is 5.

        Observation.total_data_size => 50

        ColdBuffer.max_data_rate => 2; therefore

        Time until Observation is moved => 25.

        Returns
        -------
        """
        self.buffer.hot[BUFFER_ID].current_capacity = 450
        self.observation.total_data_size = 50
        data_left_to_transfer = self.observation.total_data_size
        self.buffer.hot[BUFFER_ID].observations["stored"].append(
            self.observation)
        data_left_to_transfer = self.buffer.hot[BUFFER_ID].transfer_observation(
            self.observation,
            self.buffer.cold[BUFFER_ID].max_data_rate,
            data_left_to_transfer
        )
        self.assertEqual(48, data_left_to_transfer)
        self.assertTrue(
            self.observation in self.buffer.hot[BUFFER_ID].observations[
                "stored"]
        )
        self.assertEqual(452, self.buffer.hot[BUFFER_ID].current_capacity)
        timestep = 24
        while data_left_to_transfer > 0:
            data_left_to_transfer = self.buffer.hot[
                BUFFER_ID].transfer_observation(
                self.observation,
                self.buffer.cold[BUFFER_ID].max_data_rate,
                data_left_to_transfer
            )
        self.assertEqual(0, data_left_to_transfer)
        self.assertEqual(500, self.buffer.hot[BUFFER_ID].current_capacity)

    def test_cold_receive_data(self):
        """
        When passed an observation, over a period of time ensure that the
        complete data set is added to the Cold Buffer.

        Only when all data has finished being transferred do we add the
        observation to ColdBuffer.observations.

        Observation duration is 10; ingest rate is 5.

        Observation.total_data_size => 50

        ColdBuffer.max_data_rate => 2; therefore

        Time until Observation is moved => 25.

        Returns
        -------
        """

        self.observation.total_data_size = 50
        data_left_to_transfer = self.observation.total_data_size
        data_left_to_transfer = self.buffer.cold[BUFFER_ID].receive_observation(
            self.observation,
            data_left_to_transfer
        )
        self.assertEqual(48, data_left_to_transfer)
        self.assertFalse(
            self.observation in self.buffer.cold[BUFFER_ID].observations[
                'stored']
        )

        while data_left_to_transfer > 0:
            data_left_to_transfer = self.buffer.cold[
                BUFFER_ID].receive_observation(
                self.observation,
                data_left_to_transfer
            )
        self.assertTrue(
            self.observation in self.buffer.cold[BUFFER_ID].observations[
                'stored']
        )
        self.assertEqual(None,
                         self.buffer.cold[BUFFER_ID].observations['transfer'])

    # @unittest.skip("Functionality has changed")
    def testWorkflowAddedToQueue(self):
        """
        We only add a workflow to the queue once an observation has finished
        (and, therefore, after we have finished generating a plan for it).
        :return: None
        """

        # Calling planner.run() will store the generate plan in the observation object
        # calling next() runs the iterator immediately after generator is
        # called
        self.observation.ast = 0
        self.observation.plan = self.planner.run(self.observation,
                                                 self.buffer, None)
        self.assertTrue(self.observation.plan is not None)
Ejemplo n.º 29
0
class TestBatchSchedulerAllocation(unittest.TestCase):
    def setUp(self):
        self.algorithm = BatchProcessing
        self.env = simpy.Environment()
        config = Config(CONFIG)
        self.cluster = Cluster(self.env, config=config)
        self.buffer = Buffer(self.env, self.cluster, config)
        self.scheduler = Scheduler(self.env, self.buffer, self.cluster,
                                   DynamicAlgorithmFromPlan())
        self.algorithm = BatchProcessing()
        self.model = BatchPlanning('batch')
        self.planner = Planner(
            self.env,
            'heft',
            self.cluster,
            self.model,
        )

        self.telescope = Telescope(self.env, config, self.planner,
                                   self.scheduler)

    def test_resource_provision(self):
        """
        Given a max_resource_split of 2, and total machines of 10, we should
        provision a maximum of 5 machines within the cluster (
        max_resource_split being the number of parallel provision ings we can
        make).

        Returns
        -------

        """
        self.assertEqual(10, len(self.cluster.get_available_resources()))

    def test_max_resource_provision(self):
        obs = self.telescope.observations[0]
        self.env.process(self.cluster.provision_ingest_resources(5, obs))
        self.env.run(until=1)
        self.assertEqual(5, len(self.cluster.get_available_resources()))
        self.assertEqual(5,
                         self.algorithm._max_resource_provision(self.cluster))
        # TODO The algorithm must provision resources if they are not already
        #  provisioned.
        plan = self.planner.run(obs, self.buffer, self.telescope.max_ingest)
        self.algorithm._provision_resources(self.cluster, plan)
        self.assertEqual(5, len(self.cluster.get_idle_resources(obs.name)))
        self.assertEqual(0,
                         self.algorithm._max_resource_provision(self.cluster))
        # self.planner.run(obs, )

    def test_algorithm_allocation(self):
        obs = self.telescope.observations[0]
        obs.plan = self.planner.run(obs, self.buffer,
                                    self.telescope.max_ingest)
        # Replicate the Scheduler allocate_task() methods
        existing_schedule = {}
        existing_schedule, status = self.algorithm.run(self.cluster,
                                                       self.env.now, obs.plan,
                                                       existing_schedule)
        self.assertTrue(obs.plan.tasks[0] in existing_schedule)

    def test_observation_queue(self):
        """