示例#1
0
 def test_json(self):
     a = {"d": [1, 2, "v"], "p": 1}
     b = {"a": "b"}
     rs = ResultSet()
     rs.add(a, b)
     rs.add(b, a)
     assert [[a, b], [b, a]] == eval(rs.json())
示例#2
0
 def test_json(self):
     a = {"d": [1, 2, "v"], "p": 1}
     b = {"a": "b"}
     rs = ResultSet()
     rs.add(a, b)
     rs.add(b, a)
     self.assertEqual([[a, b], [b, a]], eval(rs.json()))
示例#3
0
class Orchestrator(object):
    """Orchestrator.

    It is responsible for orchestrating the execution of all experiments and
    aggregate results.
    """
    def __init__(self, settings, summary_freq=4):
        """Constructor

        Parameters
        ----------
        settings : Settings
            The settings of the simulator
        summary_freq : int
            Frequency (in number of experiment) at which summary messages
            are displayed
        """
        self.settings = settings
        self.results = ResultSet()
        self.seq = SequenceNumber()
        self.exp_durations = collections.deque(maxlen=30)
        self.n_success = 0
        self.n_fail = 0
        self.summary_freq = summary_freq
        self._stop = False
        if self.settings.PARALLEL_EXECUTION:
            self.pool = mp.Pool(settings.N_PROCESSES)

    def stop(self):
        """Stop the execution of the orchestrator
        """
        logger.info('Orchestrator is stopping')
        self._stop = True
        if self.settings.PARALLEL_EXECUTION:
            self.pool.terminate()
            self.pool.join()

    def run(self):
        """Run the orchestrator.

        This call is blocking, whether multiple processes are used or not. This
        methods returns only after all experiments are executed.
        """
        # Create queue of experiment configurations
        queue = collections.deque(self.settings.EXPERIMENT_QUEUE)
        # Calculate number of experiments and number of processes
        self.n_exp = len(queue) * self.settings.N_REPLICATIONS
        self.n_proc = self.settings.N_PROCESSES \
                      if self.settings.PARALLEL_EXECUTION \
                      else 1
        logger.info('Starting simulations: %d experiments, %d process(es)' %
                    (self.n_exp, self.n_proc))

        if self.settings.PARALLEL_EXECUTION:
            # This job queue is used only to keep track of which jobs have
            # finished and which are still running. Currently this information
            # is used only to handle keyboard interrupts correctly
            job_queue = collections.deque()
            # Schedule experiments from the queue
            while queue:
                experiment = queue.popleft()
                for _ in range(self.settings.N_REPLICATIONS):
                    job_queue.append(
                        self.pool.apply_async(
                            run_scenario,
                            args=(self.settings, experiment, self.seq.assign(),
                                  self.n_exp),
                            callback=self.experiment_callback))
            self.pool.close()
            # This solution is probably not optimal, but at least makes
            # KeyboardInterrupt work fine, which is crucial if launching the
            # simulation remotely via screen.
            # What happens here is that we keep waiting for possible
            # KeyboardInterrupts till the last process terminates successfully.
            # We may have to wait up to 5 seconds after the last process
            # terminates before exiting, which is really negligible
            try:
                while job_queue:
                    job = job_queue.popleft()
                    while not job.ready():
                        time.sleep(5)
            except KeyboardInterrupt:
                self.pool.terminate()
            self.pool.join()

        else:  # Single-process execution
            while queue:
                experiment = queue.popleft()
                for _ in range(self.settings.N_REPLICATIONS):
                    self.experiment_callback(
                        run_scenario(self.settings, experiment,
                                     self.seq.assign(), self.n_exp))
                    if self._stop:
                        self.stop()

        logger.info(
            'END | Planned: %d, Completed: %d, Succeeded: %d, Failed: %d',
            self.n_exp, self.n_fail + self.n_success, self.n_success,
            self.n_fail)

    def experiment_callback(self, args):
        """Callback method called by run_scenario

        Parameters
        ----------
        args : tuple
            Tuple of arguments
        """
        # If args is None, that means that an exception was raised during the
        # execution of the experiment. In such case, ignore it
        if not args:
            self.n_fail += 1
            return
        # Extract parameters
        params, results, duration = args
        self.n_success += 1
        # Store results
        self.results.add(params, results)
        self.exp_durations.append(duration)
        if self.n_success % self.summary_freq == 0:
            # Number of experiments scheduled to be executed
            n_scheduled = self.n_exp - (self.n_fail + self.n_success)
            # Compute ETA
            n_cores = min(mp.cpu_count(), self.n_proc)
            mean_duration = sum(self.exp_durations) / len(self.exp_durations)
            eta = timestr(n_scheduled * mean_duration / n_cores, False)
            # Print summary
            logger.info(
                'SUMMARY | Completed: %d, Failed: %d, Scheduled: %d, ETA: %s',
                self.n_success, self.n_fail, n_scheduled, eta)
class Orchestrator(object):
    """Orchestrator.
    
    It is responsible for orchestrating the execution of all experiments and
    aggregate results.
    """

    def __init__(self, settings, summary_freq=4):
        """Constructor
        
        Parameters
        ----------
        settings : Settings
            The settings of the simulator
        summary_freq : int
            Frequency (in number of experiment) at which summary messages
            are displayed
        """
        self.settings = settings
        self.results = ResultSet()
        self.seq = SequenceNumber()
        self.exp_durations = collections.deque(maxlen=30)
        self.n_success = 0
        self.n_fail = 0
        self.summary_freq = summary_freq
        self._stop = False
        if self.settings.PARALLEL_EXECUTION:
            self.pool = mp.Pool(settings.N_PROCESSES)
    
    def stop(self):
        """Stop the execution of the orchestrator
        """
        logger.info('Orchestrator is stopping')
        self._stop = True
        if self.settings.PARALLEL_EXECUTION:
            self.pool.terminate()
            self.pool.join()
    
    def run(self):
        """Run the orchestrator.
        
        This call is blocking, whether multiple processes are used or not. This
        methods returns only after all experiments are executed.
        """
        # Create queue of experiment configurations
        queue = collections.deque(self.settings.EXPERIMENT_QUEUE)
        # Calculate number of experiments and number of processes
        self.n_exp = len(queue) * self.settings.N_REPLICATIONS 
        self.n_proc = self.settings.N_PROCESSES \
                      if self.settings.PARALLEL_EXECUTION \
                      else 1
        logger.info('Starting simulations: %d experiments, %d process(es)' 
                    % (self.n_exp, self.n_proc))
        
        if self.settings.PARALLEL_EXECUTION:
            # This job queue is used only to keep track of which jobs have
            # finished and which are still running. Currently this information
            # is used only to handle keyboard interrupts correctly
            job_queue = collections.deque()
            # Schedule experiments from the queue
            while queue:
                experiment = queue.popleft()
                for _ in range(self.settings.N_REPLICATIONS):
                    job_queue.append(self.pool.apply_async(run_scenario,
                            args=(self.settings, experiment,
                                  self.seq.assign(), self.n_exp),
                            callback=self.experiment_callback))
            self.pool.close()
            # This solution is probably not optimal, but at least makes
            # KeyboardInterrupt work fine, which is crucial if launching the
            # simulation remotely via screen.
            # What happens here is that we keep waiting for possible
            # KeyboardInterrupts till the last process terminates successfully.
            # We may have to wait up to 5 seconds after the last process
            # terminates before exiting, which is really negligible
            try:
                while job_queue:
                    job = job_queue.popleft()
                    while not job.ready():
                        time.sleep(5)
            except KeyboardInterrupt:
                self.pool.terminate()
            self.pool.join()
        
        else: # Single-process execution
            while queue:
                experiment = queue.popleft()
                for _ in range(self.settings.N_REPLICATIONS):
                    self.experiment_callback(run_scenario(self.settings, 
                                            experiment, self.seq.assign(),
                                            self.n_exp))
                    if self._stop:
                        self.stop()

        logger.info('END | Planned: %d, Completed: %d, Succeeded: %d, Failed: %d', 
                    self.n_exp, self.n_fail + self.n_success, self.n_success, self.n_fail)
        

    def experiment_callback(self, args):
        """Callback method called by run_scenario
        
        Parameters
        ----------
        args : tuple
            Tuple of arguments
        """
        # If args is None, that means that an exception was raised during the
        # execution of the experiment. In such case, ignore it
        if not args:
            self.n_fail += 1
            return
        # Extract parameters
        params, results, duration = args
        self.n_success += 1
        # Store results
        self.results.add(params, results)
        self.exp_durations.append(duration)
        if self.n_success % self.summary_freq == 0:
            # Number of experiments scheduled to be executed
            n_scheduled = self.n_exp - (self.n_fail + self.n_success)
            # Compute ETA
            n_cores = min(mp.cpu_count(), self.n_proc)
            mean_duration = sum(self.exp_durations)/len(self.exp_durations)
            eta = timestr(n_scheduled*mean_duration/n_cores, False)
            # Print summary
            logger.info('SUMMARY | Completed: %d, Failed: %d, Scheduled: %d, ETA: %s', 
                        self.n_success, self.n_fail, n_scheduled, eta)
示例#5
0
class Orchestrator(object):
    """Orchestrator.
    
    It is responsible for orchestrating the execution of all experiments and
    aggregate results.
    """

    def __init__(self, settings, summary_freq=4):
        """Constructor
        
        Parameters
        ----------
        settings : Settings
            The settings of the simulator
        summary_freq : int
            Frequency (in number of experiment) at which summary messages
            are displayed
        """
        self.settings = settings
        self.results = ResultSet()
        self.seq = SequenceNumber()
        self.exp_durations = collections.deque(maxlen=30)
        self.summary_freq = summary_freq
        self._stop = False
        if settings.PARALLEL_EXECUTION:
            self.pool = mp.Pool(settings.N_PROCESSES)
    
    def stop(self):
        """Stop the execution of the orchestrator
        """
        logger.info('Orchestrator is stopping')
        self._stop = True
        if self.settings.PARALLEL_EXECUTION:
            self.pool.terminate()
            self.pool.join()
    
    def run(self):
        """Run the orchestrator.
        
        This call is blocking, whether multiple processes are used or not. This
        methods returns only after all experiments are executed.
        """
        # Create queue of experiment configurations
        if 'EXPERIMENT_QUEUE' in self.settings and self.settings.EXPERIMENT_QUEUE:
            queue = collections.deque(self.settings.EXPERIMENT_QUEUE)
        else:
            queue = collections.deque()
            n_contents = self.settings.N_CONTENTS
            for topology_name in self.settings.TOPOLOGIES:
                for network_cache in self.settings.NETWORK_CACHE:
                    for alpha in self.settings.ALPHA:
                        for strategy_name in self.settings.STRATEGIES:
                            params = dict(alpha=alpha,
                                          topology_name=topology_name,
                                          network_cache=network_cache,
                                          strategy_name=strategy_name,
                                          n_contents=n_contents,
                                          strategy_params={})
                            queue.append(params)
        # Calc number of experiments nad number of processes
        self.n_exp = len(queue) * self.settings.N_REPLICATIONS 
        self.n_proc = self.settings.N_PROCESSES \
                      if self.settings.PARALLEL_EXECUTION \
                      else 1
        logger.info('Starting simulations: %d experiments, %d process(es)' 
                    % (self.n_exp, self.n_proc))
        # Schedule experiments from the queue
        while queue:
            experiment = queue.popleft()
            for _ in range(self.settings.N_REPLICATIONS):
                if self.settings.PARALLEL_EXECUTION:
                    last_job = self.pool.apply_async(run_scenario,
                            args=(self.settings, experiment,
                                  self.seq.assign(), self.n_exp),
                            callback=self.experiment_callback)
                else:
                    self.experiment_callback(run_scenario(self.settings, 
                                    experiment, self.seq.assign(),
                                    self.n_exp))
                if self._stop:
                    self.stop()
                                
        # If parallel execution, wait for all processes to terminate
        if self.settings.PARALLEL_EXECUTION:
            self.pool.close()
            # This solution is not optimal, but at least makes KeyboardInterrupt
            # work fine, which is crucial if launching the simulation remotely
            # via screen.
            # What happens here is that we keep waiting for possible
            # KeyboardInterrupts till the last scheduled process terminates
            # successfully. The last scheduled process is not necessarily the last
            # finishing one but nothing bad is going to happen in such case, it
            # will only not be possible interrupting the simulation between the
            # last scheduled process ends and the last ending process ends, which
            # is likely a matter of seconds.
            try:
                while not last_job.ready(): time.sleep(5)
            except KeyboardInterrupt:
                self.pool.terminate()
                self.pool.join()
                return
            self.pool.join()
    
    
    def experiment_callback(self, args):
        """Callback method called by run_scenario
        
        Parameters
        ----------
        args : tuple
            Tuple of arguments
        """
        # Extract parameters
        params, results, seq, duration = args
        # Store results
        self.results.add((params, results))
        self.exp_durations.append(duration)
        if seq % self.summary_freq == 0:
            # Number of experiments scheduled to be executed
            n_scheduled = self.n_exp - seq
            # Compute ETA
            n_cores = min(mp.cpu_count(), self.n_proc)
            mean_duration = sum(self.exp_durations)/len(self.exp_durations)
            eta = timestr(n_scheduled*mean_duration/n_cores, False)
            # Print summary
            logger.info('SUMMARY | Completed: %d, Scheduled: %d, ETA: %s', 
                        seq, n_scheduled, eta)