Пример #1
0
def run_tests():
    """
    Run some basic testfiles for the simulator backend in /topo/custom/tests folder;
    Note that this code is partially obsolete, dig into the code for details. Also
    note that test_routing_with_overutil.py and test_changepath_stacked.py are supposed 
    to fail at the moment (represent corner cases that are çurrently not properly supported).
    """
    success = []
    failure = []

    # get all test files
    testfiles = []
    blacklist = ["__init__.py", "testutil.py"]
    testdir = os.path.join(os.getcwd(), "topo", "custom", "tests")
    for root, dirs, files in os.walk(testdir):
        for file in files:
            if file in blacklist: continue
            if file.endswith(".py"):
                testfiles.append(file)
    logger.info("tests: %s" % ",".join(testfiles))

    # run them one by one and print a summary afterwards
    for testname in testfiles:
        # please not that print is used because it logs to stdout
        print("+------------------------+")
        print("| %s" % testname)
        print("+------------------------+")
        ctx = Context()
        topo = Topology(ctx, filename='custom/tests/%s' % testname)
        try:
            sim = Simulator(ctx)
            errors = sim.run()
        except Exception as e:
            failure.append((testname, ['test crashed: %s' % str(e)]))
            continue
        if errors == None:
            # if the simulator didn't return an array the
            # on_test_finished method is not implemented properly
            failure.append(
                (testname,
                 ['test did not return an array, prob. not implemented']))
            continue
        if len(errors) == 0:
            success.append(testname)
        else:
            failure.append((testname, errors))

    logger.info("+------ Test Results ----+")
    for test in success:
        logger.info("| %30s PASSED" % test)
    if len(failure) > 0: logger.info("|--->")
    for test, errors in failure:
        logger.info("| %30s FAILED" % test)
    logger.info("+------------------------+")
    if len(failure) == 0:
        logger.info("| ALL TESTS PASSED")
        logger.info("+------------------------+")
    def setUp(self):
        self.history = 4

        with open('./data/replay_buffer.pkl', 'rb') as file:
            self.replay_buffer = pickle.load(file)

        env_to_use = 'Pendulum-v0'
        env = gym.make(env_to_use)
        self.history = 4
        self.simulator = Simulator(self.history, env)
Пример #3
0
class  MatcherTestCase(unittest.TestCase):
    """Tests the various Matcher subclasses"""

    all_matchers = [GeogMatcher]

    def setUp(self):
        self.sim = Simulator()
        self.sim.simulate(10, 13579)
        

    def test_matcher(self):
        """Test various matcher subclasses"""
        def test_empty_matcher(MatcherSub):
            """Test empty Matcher returns valid empty results"""
            ## Test that an empty set returns valid, emtpy results
            matcher_sub = MatcherSub(set([]))
            unmatched = matcher_sub.get_unmatched()
            best_matches = matcher_sub.get_best_matches()
            self.assertEqual(unmatched, set([]));
            self.assertEqual(best_matches, set([]));

        def test_matcher_matches(MatcherSub, expected_matches, expected_unmatches):
            """Spin up Matcher subclass and confirm expected results"""
            users = self.sim.get_users()
            self.assertEqual(len(users),10, "Sanity check simulator user count")
            matcher_sub = MatcherSub(users,seed=1234)
            best_matches = set([(tup[0].user_id, tup[1].user_id) for tup in matcher_sub.get_best_matches() ])
            unmatched = set([user.user_id for user in matcher_sub.get_unmatched()])
            print best_matches
            print unmatched

            for match in expected_matches:
                self.assertIn(match, best_matches, "Expected match not found - perhaps the algo or simulator changed?")

            for unmatch in expected_unmatches:
                self.assertIn(unmatch, unmatched, "Expected unmatched not found - perhaps the algo or simulator changed?")

        ## Test empty matcher return valid results
        for MatcherSub in self.all_matchers:
            test_empty_matcher(MatcherSub)

        ## For each Matcher subclass, work out what the answers should be
        ## given the simulator's users, and record the answers here
        geog_matched = [(7736498, 2837615), (5072324, 795755), (4454245, 3882723)]
        geog_unmatched = [1510961, 8148259, 6505708, 2432669]

        ## Feed the expected results into 3-tuples below to be tested
        ## (MatcherSubclass , expected_match_results, expected_unmatched_results)
        test_cases = [
            (GeogMatcher, geog_matched,geog_unmatched)
            ]

        ## Test full matchers return correct results
        for match_case in test_cases:
            test_matcher_matches(*match_case)
Пример #4
0
    def setUp(self):
        """
        Test Preparation

        The simulator should deliver the time change signal in intervals of
        one second divided by TEST_SPEED.
        Since the simulator delivers event through the QtCore event loop, we
        need to setup a qt application.

        """
        self.app = QtCore.QCoreApplication([])
        self.history = []
        self.time_step = 1 / TEST_SPEED
        self.simulator = Simulator(self._callback)
        # state variables
        self.simulation_time = None
        self.t_elapsed = 0
Пример #5
0
    def get_users(self):
        """Read in the survey data or simulate new data"""
        self.users = self.get_users_parser(APIParser, self.survey_api_survey_id)
        print self.users
        if self.users is None:
            if os.path.isfile(self.survey_data_filename):
                self.users = self.get_users_parser(TextParser, self.survey_data_filename)
            else:
                print "Simulating data"
                ## Simulate some number of survey responses
                sim = Simulator()
                sim.simulate(200) ## By adding a seed parameter, you can have reproducible sims
                self.users = sim.get_users()

    #    ## Print some summaries
    #    for user in users:
    #        user.to_string()

        # Print some descriptive stats
        print "# Users", len(self.users), [u.name for u in self.users]
Пример #6
0
 def setUp(self):
     self.sim = Simulator()
     self.sim.simulate(10, 13579)
Пример #7
0
def main():

    # -w (run preprocessor write)
    if args.run_preprocessor:
        run_preprocessor()
        exit(0)

    # -r (run preprocessor read)
    if args.run_preprocessor_read:
        run_preprocessor_read()
        exit(0)

    # -a (run aggregated plotter)
    if args.aggregated_plots:
        if not args.plotters:
            logger.error("The -u option is required for -a. Exit now.")
            exit(1)
        if args.plotters == 'util/agg_preprocessor.py':
            args.run_preprocessor = args.aggregated_plots
            run_preprocessor()
            exit(0)
        run_aggregated_plotter()
        exit(0)

    # -p and -u (run plotter from pickle file)
    if args.picklefile:
        run_plotter()
        exit(0)

    # -t (run tests)
    if args.run_tests:
        run_tests()
        exit(0)

    # make sure the file that should be run exists
    if not args.filename:
        logger.error('The -f options is required. Exit now.')
        exit(1)

    try:
        # TODO: currently, filenames are relative to the topo folder
        topo_folder = os.path.join(os.getcwd(), 'topo')
        filename = args.filename.strip()
        if not filename.endswith('.py'): filename = filename + '.py'

        # make sure the file that should be run exists
        if not os.path.exists(os.path.join(topo_folder, filename)):
            scripts = get_scripts(topo_folder)
            print(scripts)
            for script in scripts:
                if filename == os.path.basename(script):
                    logger.info('use file: %s' % script)
                    filename = script
                    break
            else:
                logger.error('The file specified via -f was not found: %s' %
                             str(filename))
                # fallback to default
                topo_folder = os.path.join(os.getcwd(), 'topo')
                filename = os.path.join(topo_folder, 'custom/pbce/exp800-2.py')
                logger.error('Using fallback: %s' % filename)
                if not os.path.exists(os.path.join(topo_folder, filename)):
                    logger.error('Fallback failed')
                    exit(1)

        # create a new experiment context that stores all the data; this is
        # the central object that is basically injected everywhere from now on; try not
        # to add anything here except raw objects (i.e., no functions) due to
        # serialization issues
        ctx = Context()
        ctx.started = time.time()
        timer = Timer(ctx, 'main')
        timer.start('total_time')

        # enforce specific seed (stored in ctx)
        if args.seed:
            logger.info('Global seed set to %d via CLI' % int(args.seed))
            ctx.seed = int(args.seed)

        # only run the scenario generator (neither the simulator nor the DTS/RSA algorithms
        # are executed)
        if args.run_scenario_generator:
            logger.info(
                'Note: -g flag is set, i.e., the scenario generator ctx flag is active and the simulator will not be executed'
            )
            ctx.run_scenario_generator = True

        # inject config from configfile into ctx
        # this is useful if the tool is used in an automated fashion
        if args.config:
            with open(args.config, 'r') as file:
                config = json.loads(file.read())
                ctx.configfile = args.config
                ctx.config = config
                #logger.info(str(config) + " " + args.filename)

        # create topology
        topo = Topology(ctx, filename=filename)

        if args.run_scenario_generator:
            logger.info(
                'Run scenario generator instead of simulator (-g flag was set)'
            )
            ctx.statistics['sim.skipped'] = 1

        # finally run the simulator if requested
        if not ctx.skip_main_simulation == True:
            sim = Simulator(ctx)
            sim.run()

        # print the statistics
        print_statistics(ctx)

        # save the aggregated statistics (statistics.json)
        if ctx.configfile:
            statistics_file = os.path.join(os.path.dirname(ctx.configfile),
                                           'statistics.json')
            with open(statistics_file, 'w') as file:
                file.write(json.dumps(ctx.statistics))

        # saves a pickle result file from ctx to access all raw data later on
        # (skipped if -n flag is used)
        if not args.nopickle:
            if ctx.scenario is not None:
                timer.stop()
                return
                #raise Exception("ctx.scenario is set -> not possible to create a pickle file (-n option is mandatory here!")
            if ctx.configfile:
                sys.setrecursionlimit(100000)
                result_file = os.path.join(os.path.dirname(ctx.configfile),
                                           'result.pickle')
                pickle.dump(ctx,
                            open(result_file, "wb"),
                            protocol=pickle.HIGHEST_PROTOCOL)

        # deprecated
        if args.plotters:
            logger.info("run plotters..")
            plotters = args.plotters.split(',')
            # handle plotters
            for plotter in plotters:
                modulepath = "plotter.%s" % plotter.replace('.py', '').replace(
                    os.sep, '.')
                modulepath = modulepath.replace('..', '.')
                logger.info("loading plotter: %s" % modulepath)
                importlib.import_module(modulepath).plot(ctx)

        timer.stop()

    except TimeoutError as e:
        timer.stop()
        # maximum time has exceeded
        logger.info("Timelimit exceeded, abort")
        # create statistics file (this is not technically an error, some experiments are just
        # running too long)
        if ctx.configfile:
            statistics_file = os.path.join(os.path.dirname(ctx.configfile),
                                           'statistics.json')
            ctx.statistics['hit_timelimit'] = time.time() - ctx.started
            print_statistics(ctx)
            with open(statistics_file, 'w') as file:
                file.write(json.dumps(ctx.statistics))
        # still create an error message for quick checks
        exc_string = traceback.format_exc()
        if ctx.configfile:
            error_file = os.path.join(os.path.dirname(ctx.configfile),
                                      'timeout-error.txt')
            with open(error_file, 'w') as file:
                file.write(exc_string)
        # finally print the exception and exit
        print("Exception:")
        print('-' * 60)
        print(exc_string)
        print('-' * 60)
        exit(0)

    except Exception as e:
        timer.stop()
        print("Exception:")
        print('-' * 60)
        exc_string = traceback.format_exc()
        print(exc_string)
        print('-' * 60)
        # save the aggregated statistics (statistics.json)
        if ctx.configfile:
            error_file = os.path.join(os.path.dirname(ctx.configfile),
                                      'error.txt')
            with open(error_file, 'w') as file:
                file.write(exc_string)
        raise e
Пример #8
0
class MatcherTestCase(unittest.TestCase):
    """Tests the various Matcher subclasses"""

    all_matchers = [GeogMatcher]

    def setUp(self):
        self.sim = Simulator()
        self.sim.simulate(10, 13579)

    def test_matcher(self):
        """Test various matcher subclasses"""
        def test_empty_matcher(MatcherSub):
            """Test empty Matcher returns valid empty results"""
            ## Test that an empty set returns valid, emtpy results
            matcher_sub = MatcherSub(set([]))
            unmatched = matcher_sub.get_unmatched()
            best_matches = matcher_sub.get_best_matches()
            self.assertEqual(unmatched, set([]))
            self.assertEqual(best_matches, set([]))

        def test_matcher_matches(MatcherSub, expected_matches,
                                 expected_unmatches):
            """Spin up Matcher subclass and confirm expected results"""
            users = self.sim.get_users()
            self.assertEqual(len(users), 10,
                             "Sanity check simulator user count")
            matcher_sub = MatcherSub(users, seed=1234)
            best_matches = set([(tup[0].user_id, tup[1].user_id)
                                for tup in matcher_sub.get_best_matches()])
            unmatched = set(
                [user.user_id for user in matcher_sub.get_unmatched()])
            print best_matches
            print unmatched

            for match in expected_matches:
                self.assertIn(
                    match, best_matches,
                    "Expected match not found - perhaps the algo or simulator changed?"
                )

            for unmatch in expected_unmatches:
                self.assertIn(
                    unmatch, unmatched,
                    "Expected unmatched not found - perhaps the algo or simulator changed?"
                )

        ## Test empty matcher return valid results
        for MatcherSub in self.all_matchers:
            test_empty_matcher(MatcherSub)

        ## For each Matcher subclass, work out what the answers should be
        ## given the simulator's users, and record the answers here
        geog_matched = [(7736498, 2837615), (5072324, 795755),
                        (4454245, 3882723)]
        geog_unmatched = [1510961, 8148259, 6505708, 2432669]

        ## Feed the expected results into 3-tuples below to be tested
        ## (MatcherSubclass , expected_match_results, expected_unmatched_results)
        test_cases = [(GeogMatcher, geog_matched, geog_unmatched)]

        ## Test full matchers return correct results
        for match_case in test_cases:
            test_matcher_matches(*match_case)
Пример #9
0
 def setUp(self):
     self.sim = Simulator()
     self.sim.simulate(10, 13579)
Пример #10
0
 def test_blank_simulator(self):
     blankSim = Simulator()
     self.assertEqual(blankSim.get_users(),set([]))
     self.assertEqual(blankSim.CAREER_STAGES, {"Student": 10, "Entry": 15, "Early":25, "Middle": 40, "Late": 10})
Пример #11
0
 def setUp(self):
     self.sim = Simulator()
Пример #12
0
class  SimulatorTestCase(unittest.TestCase):
    def setUp(self):
        self.sim = Simulator()

    def tearDown(self):
        self.sim = None

    def test_simulator(self):
        self.assertEqual(self.sim.get_users(),set([]))
        self.assertEqual(self.sim.CAREER_STAGES, {"Student": 10, "Entry": 15, "Early":25, "Middle": 40, "Late": 10})

    def test_simulate(self):
        """Tests the simulate and clear_users methods
        
        Since multiple calls to simulate continue to grow the group,
        the test demonstrates that the group grows.  It also tests that
        the users set can be cleared.
        """

        ## Simulate the first 5 users
        self.sim.simulate(5,1234)
        users = list(sorted(
            self.sim.get_users(),
            cmp=lambda x,y : cmp(x.user_id,y.user_id)
            ))
        print users
        expected_results = {
            4 : (8698081,"Melinda Thompson"),
            3 : (8453420, "Freddie Bahlmann"),
            2 : (8198783, "Gary Paddock"),
            1 : (3966593, "Kevin Wilson"),
            0 : (67423, "Scott Lopez")
        }
        for i in range(5):
            self.assertEqual((users[i].user_id,users[i].name),expected_results[i])
        
        ## Clear the simulated users, and resimulate the same users
        self.sim.clear_users()
        self.sim.simulate(5,1234)
        users = list(sorted(
            self.sim.get_users(),
            cmp=lambda x,y : cmp(x.user_id,y.user_id)
            ))
        print users
        expected_results = {
            4 : (8698081,"Melinda Thompson"),
            3 : (8453420, "Freddie Bahlmann"),
            2 : (8198783, "Gary Paddock"),
            1 : (3966593, "Kevin Wilson"),
            0 : (67423, "Scott Lopez")
        }
        for i in range(5):
            self.assertEqual((users[i].user_id,users[i].name),expected_results[i])

        ## Add 5 more users on top of existing users.
        ## Since the same seed is used, it should be tempted
        ## to duplicate the previous users, but the simulate
        ## method should prevent this from happening
        self.sim.simulate(5,1234)
        users = list(sorted(
            self.sim.get_users(),
            cmp=lambda x,y : cmp(x.user_id,y.user_id)
            ))
        print users
        expected_results = {
            9 : (8698081,"Melinda Thompson"),
            8 : (8698080, "Angela Jones"),
            7 : (8453420, "Freddie Bahlmann"),
            6 : (8453419, "Marilynn Orr"),
            5 : (8198783, "Gary Paddock"),
            4 : (8198781, "Tiffany Mchaney"),
            3 : (3966593, "Kevin Wilson"),
            2 : (3966592, "Donna Yu"),
            1 : (67424, "Patrice Newell"),
            0 : (67423, "Scott Lopez")
        }
        for i in range(5):
            self.assertEqual((users[i].user_id,users[i].name),expected_results[i])

    def test_blank_simulator(self):
        blankSim = Simulator()
        self.assertEqual(blankSim.get_users(),set([]))
        self.assertEqual(blankSim.CAREER_STAGES, {"Student": 10, "Entry": 15, "Early":25, "Middle": 40, "Late": 10})
class MyTestCase(unittest.TestCase):
    """
    hello
    """
    def setUp(self):
        self.history = 4

        with open('./data/replay_buffer.pkl', 'rb') as file:
            self.replay_buffer = pickle.load(file)

        env_to_use = 'Pendulum-v0'
        env = gym.make(env_to_use)
        self.history = 4
        self.simulator = Simulator(self.history, env)

    def test_format_buffer(self):
        # Separate into prev_observations
        # previous actions
        # next observations
        # and reward for next observation
        p_obs, p_a, n_o, p_r = self.simulator._format_buffer(
            self.replay_buffer)

        # Check prev Observations
        first_obs = np.array([
            self.replay_buffer[i][0] for i in range(self.history - 1, -1, -1)
        ]).reshape(-1)
        self.assert_(np.array_equal(first_obs, p_obs[0]))

        pos = 200
        second_obs = np.array([
            self.replay_buffer[i][0]
            for i in range(pos + self.history - 1, pos - 1, -1)
        ]).reshape(-1)

        self.assert_(np.array_equal(second_obs, p_obs[197]))

        pos = 400
        third_obs = np.array([
            self.replay_buffer[i][0]
            for i in range(pos + self.history - 1, pos - 1, -1)
        ]).reshape(-1)
        self.assert_(np.array_equal(third_obs, p_obs[394]))

        # Check prev Actions
        first_act = np.array([
            self.replay_buffer[i][1] for i in range(self.history - 1, -1, -1)
        ]).reshape(-1)
        self.assert_(np.array_equal(first_act, p_a[0]))

        pos = 200
        second_act = np.array([
            self.replay_buffer[i][1]
            for i in range(pos + self.history - 1, pos - 1, -1)
        ]).reshape(-1)

        self.assert_(np.array_equal(second_act, p_a[197]))

        pos = 400
        third_act = np.array([
            self.replay_buffer[i][1]
            for i in range(pos + self.history - 1, pos - 1, -1)
        ]).reshape(-1)
        self.assert_(np.array_equal(third_act, p_a[394]))

        # Check next observation
        first_no = self.replay_buffer[self.history - 1][2]
        print(first_no.shape, n_o[0].shape)
        self.assert_(np.array_equal(first_no, n_o[0]))

        pos = 200
        second_no = self.replay_buffer[pos + self.history - 1][2]
        self.assert_(np.array_equal(second_no, n_o[197]))

        pos = 400
        third_no = self.replay_buffer[pos + self.history - 1][2]
        self.assert_(np.array_equal(third_no, n_o[394]))

        # Check reward
        first_r = self.replay_buffer[self.history - 1][3]
        print(first_r.shape, n_o[0].shape)
        self.assert_(np.array_equal(first_r, p_r[0]))

        pos = 200
        second_r = self.replay_buffer[pos + self.history - 1][3]
        self.assert_(np.array_equal(second_r, p_r[197]))

        pos = 400
        third_r = self.replay_buffer[pos + self.history - 1][3]
        self.assert_(np.array_equal(third_r, p_r[394]))

    def test_train(self):
        pass
Пример #14
0
class BasicOperation(unittest.TestCase):
    """ Basic operation of the simulator """
    def setUp(self):
        """
        Test Preparation

        The simulator should deliver the time change signal in intervals of
        one second divided by TEST_SPEED.
        Since the simulator delivers event through the QtCore event loop, we
        need to setup a qt application.

        """
        self.app = QtCore.QCoreApplication([])
        self.history = []
        self.time_step = 1 / TEST_SPEED
        self.simulator = Simulator(self._callback)
        # state variables
        self.simulation_time = None
        self.t_elapsed = 0

    def _callback(self, t):
        self.simulation_time = t

    def _create_time_range(self, seconds):
        start = datetime(2013, 12, 4, 9)
        end = start + timedelta(seconds=seconds)
        return start, end

    def _step_time(self):
        """ a helper function that increases the time step """
        sleep(self.time_step)
        self.t_elapsed += self.time_step

    def test_delivery(self):
        """ Tests signal delivery by the simulator """
        # Simulate a time range of 2 seconds
        duration = 2
        # Allow a bit of leeway for the expected simulation end time
        max_t = (duration + 1.5) / TEST_SPEED
        min_t = (duration - 0.2) / TEST_SPEED
        t_range = self._create_time_range(duration)
        self.simulator.configure(t_range, TEST_SPEED)
        self.simulator.start()

        while (self.t_elapsed < max_t
               and (self.simulation_time is None
                    or self.simulation_time < t_range[1])):
            self.app.processEvents()
            self._step_time()
        self.assertLess(
            self.t_elapsed, max_t,
            'Events were not delivered in time (' + str(self.t_elapsed) + ')')
        self.assertGreater(self.t_elapsed, min_t, 'Event delivery too fast')

    def test_start_with_external_signal(self):
        signal_emitter = SignalEmitter()
        t_range = self._create_time_range(3600)
        start_time = t_range[0]
        dt = timedelta(seconds=1800)
        self.simulator.configure(t_range, 0, signal_emitter.test_signal, dt)
        self.simulator.start()
        self.app.processEvents()
        self.assertEqual(self.simulation_time, start_time + dt,
                         'First step was not executed immediately')
        signal_emitter.test_signal.emit()
        self.app.processEvents()
        self.assertEqual(self.simulation_time, start_time + 2 * dt,
                         'Simulator did not step on external signal')
        # The simulation should now be finished
        signal_emitter.test_signal.emit()
        self.app.processEvents()
        self.assertEqual(self.simulation_time, start_time + 2 * dt,
                         'Simulator has not ended as expected')

    def test_pause(self):
        """ Tests pausing the simulator """
        duration = 3
        max_t = 8 / TEST_SPEED
        min_t = 5 / TEST_SPEED
        t_range = self._create_time_range(duration)
        self.simulator.configure(t_range, TEST_SPEED)
        self.simulator.start()

        # No events should be delivered during the pause
        while (self.t_elapsed < max_t
               and (self.simulation_time is None
                    or self.simulation_time < t_range[1])):
            self.app.processEvents()
            if self.t_elapsed == 2 / TEST_SPEED:
                self.simulator.pause()
            elif self.t_elapsed == 5 / TEST_SPEED:
                self.simulator.start()
            self._step_time()
        self.assertLess(
            self.t_elapsed, max_t,
            'Events were not delivered in time (' + str(self.t_elapsed) + ')')
        self.assertGreater(self.t_elapsed, min_t, 'Event delivery too fast')

    def test_stop(self):
        """ Tests premature simulation stop """
        duration = 3
        max_t = 5 / TEST_SPEED
        t_range = self._create_time_range(duration)
        self.simulator.configure(t_range, TEST_SPEED)
        self.simulator.start()

        while self.t_elapsed < max_t:
            self.app.processEvents()
            if self.t_elapsed == 2 / TEST_SPEED:
                self.simulator.stop()
            self._step_time()
        self.assertLess(self.simulation_time, t_range[1])