Exemple #1
0
This also involves the planning and execution of a more complex workflow;
namely, the original HEFT workflow from Topcuoglu 2000.
"""

import simpy

from topsim.user.schedule.dynamic_plan import DynamicAlgorithmFromPlan
from topsim.user.telescope import Telescope
from topsim.core.simulation import Simulation

EVENT_FILE = 'simulations/real_time/real_time.trace'
CONFIG = 'simulations/real_time/real_time.json'

env = simpy.Environment()

planning_algorithm = 'heft'
scheduling_algorithm = DynamicAlgorithmFromPlan
instrument = Telescope

simulation = Simulation(
    env=env,
    config=CONFIG,
    instrument=instrument,
    algorithm_map={'pheft': 'pheft', 'heft': 'heft', 'fifo': DynamicAlgorithmFromPlan},
    event_file=EVENT_FILE,
)

simulation.start(11)
simulation.resume(300)
Exemple #2
0
class TestBasicIngest(unittest.TestCase):

    def setUp(self) -> None:
        self.env = simpy.Environment()
        self.simulation = Simulation(
            self.env,
            BASIC_CONFIG,
            Telescope,
            planning_algorithm='heft',
            planning_model=SHADOWPlanning('heft'),
            scheduling=DynamicAlgorithmFromPlan,
            delay=None,
            timestamp=SIM_TIMESTAMP
        )

    # def tearDown(self):
    #     output = 'test/basic-workflow-data/output/{0}'
    #     os.remove(f'{output}-sim.pkl')
    #     os.remove(f'{output}-tasks.pkl')

    def testClusterIngest(self):
        """
        The basic ingest represents the edge cases for timing and scheduling
        within the simulation, as demonstrated in this test.

        There are a couple of edge cases that occur here, especially when we
        consider that we have only 2 resources; one of these will be taken by
        ingest, meaning that we cannot start an observation until 1 timestep
        AFTER an ingest has finished, because the telescope will check before
        that task is successfully removed from the cluster.

        This is why we run for 6 seconds and only process 2 observations.

        After we've observed 2 observations, we reach capacity on the
        cold-buffer so we are unable to observe any more.

        Returns
        -------

        """
        self.assertEqual(0, self.env.now)
        self.simulation.start(runtime=7)
        self.assertEqual(
            2, self.simulation.cluster._ingest['completed']
        )

        self.assertEqual(
            RunStatus.FINISHED,
            self.simulation.instrument.observations[1].status
        )

    def testBufferIngest(self):
        self.assertEqual(0, self.simulation.env.now)
        self.simulation.start(runtime=1)
        self.assertEqual(
            5, self.simulation.buffer.hot[0].current_capacity
        )
        self.simulation.resume(until=2)
        self.assertEqual(
            10, self.simulation.buffer.hot[0].current_capacity
        )
        self.assertEqual(
            5, self.simulation.buffer.cold[0].current_capacity
        )
        self.assertEqual(
            1,
            len(self.simulation.buffer.cold[0].observations["stored"])
        )
        self.simulation.resume(until=4)
        self.assertEqual(10, self.simulation.buffer.hot[0].current_capacity)
        self.assertEqual(0, self.simulation.buffer.cold[0].current_capacity)
        self.assertEqual(
            2,
            len(self.simulation.buffer.cold[0].observations["stored"])
        )

    def testSchedulerRunTime(self):
        self.assertEqual(0, self.simulation.env.now)
        self.simulation.start(runtime=2)
        self.assertEqual(
            1, len(self.simulation.buffer.cold[0].observations['stored'])
        )
        self.simulation.resume(until=8)
        self.simulation.resume(until=11)
        self.simulation.resume(until=12)
        # self.assertEqual(0, len(self.simulation.cluster.tasks['running']))
        # We've finished processing one of the workflows so one observation
        # is finished.
        self.assertEqual(
        2, len(self.simulation.buffer.cold[0].observations['stored'])
        )