Ejemplo n.º 1
0
    def test_abort_on_initial_design(self, patch):
        def target(x):
            return 5

        # should raise an error if abort_on_first_run_crash is True
        patch.side_effect = FirstRunCrashedException()
        scen = Scenario({
            'cs': test_helpers.get_branin_config_space(),
            'run_obj': 'quality',
            'output_dir': 'data-test_smbo-abort',
            'abort_on_first_run_crash': True
        })
        self.output_dirs.append(scen.output_dir)
        smbo = SMAC4AC(scen, tae_runner=target, rng=1).solver
        self.assertRaises(FirstRunCrashedException, smbo.run)

        # should not raise an error if abort_on_first_run_crash is False
        patch.side_effect = FirstRunCrashedException()
        scen = Scenario({
            'cs': test_helpers.get_branin_config_space(),
            'run_obj': 'quality',
            'output_dir': 'data-test_smbo-abort',
            'abort_on_first_run_crash': False,
            'wallclock-limit': 1
        })
        self.output_dirs.append(scen.output_dir)
        smbo = SMAC4AC(scen, tae_runner=target, rng=1).solver

        try:
            smbo.start()
            smbo.run()
        except FirstRunCrashedException:
            self.fail('Raises FirstRunCrashedException unexpectedly!')
Ejemplo n.º 2
0
 def setUp(self):
     self.scenario = Scenario({
         'cs': test_helpers.get_branin_config_space(),
         'run_obj': 'quality',
         'output_dir': '',
         'runcount_limit': 1,
         'deterministic': True
     })
Ejemplo n.º 3
0
 def setUp(self):
     self.scenario = Scenario({
         'cs': test_helpers.get_branin_config_space(),
         'run_obj': 'quality',
         'output_dir': 'data-test_smbo'
     })
     self.output_dirs = []
     self.output_dirs.append(self.scenario.output_dir)
Ejemplo n.º 4
0
    def test_update_intensification_percentage(self):
        """
        This test checks the intensification time bound is updated in subsequent iterations as long as
        num_runs of the intensifier is not reset to zero.
        """
        def target(x):
            return 5

        scen = Scenario({
            'cs': test_helpers.get_branin_config_space(),
            'run_obj': 'quality',
            'output_dir': 'data-test_smbo-intensification'
        })
        self.output_dirs.append(scen.output_dir)
        solver = SMAC4AC(scen, tae_runner=target, rng=1).solver

        solver.stats.is_budget_exhausted = unittest.mock.Mock()
        solver.stats.is_budget_exhausted.side_effect = tuple(([False] * 10) +
                                                             [True] * 8)

        solver._get_timebound_for_intensification = unittest.mock.Mock(
            wraps=solver._get_timebound_for_intensification)

        class SideEffect:
            def __init__(self, intensifier, get_next_run):
                self.intensifier = intensifier
                self.get_next_run = get_next_run
                self.counter = 0

            def __call__(self, *args, **kwargs):
                self.counter += 1
                if self.counter % 4 == 0:
                    self.intensifier.num_run = 0
                return self.get_next_run(*args, **kwargs)

        solver.intensifier.get_next_run = unittest.mock.Mock(
            side_effect=SideEffect(solver.intensifier,
                                   solver.intensifier.get_next_run))

        solver.run()

        get_timebound_mock = solver._get_timebound_for_intensification
        self.assertEqual(get_timebound_mock.call_count, 6)
        self.assertFalse(get_timebound_mock.call_args_list[0][1]['update'])
        self.assertFalse(get_timebound_mock.call_args_list[1][1]['update'])
        self.assertTrue(get_timebound_mock.call_args_list[2][1]['update'])
        self.assertFalse(get_timebound_mock.call_args_list[3][1]['update'])
        self.assertTrue(get_timebound_mock.call_args_list[4][1]['update'])
        self.assertTrue(get_timebound_mock.call_args_list[5][1]['update'])

        self.assertGreater(get_timebound_mock.call_args_list[2][0][0],
                           get_timebound_mock.call_args_list[1][0][0])
        self.assertLess(get_timebound_mock.call_args_list[3][0][0],
                        get_timebound_mock.call_args_list[2][0][0])
        self.assertGreater(get_timebound_mock.call_args_list[4][0][0],
                           get_timebound_mock.call_args_list[3][0][0])
        self.assertGreater(get_timebound_mock.call_args_list[5][0][0],
                           get_timebound_mock.call_args_list[4][0][0])
Ejemplo n.º 5
0
 def get_smbo(intensification_perc):
     """ Return SMBO with intensification_percentage. """
     scen = Scenario({
         'cs': test_helpers.get_branin_config_space(),
         'run_obj': 'quality',
         'output_dir': '',
         'intensification_percentage': intensification_perc
     })
     return SMAC(scen, tae_runner=target, rng=1).solver
Ejemplo n.º 6
0
 def test_abort_on_initial_design(self, patch):
     def target(x):
         return 5
     patch.side_effect = FirstRunCrashedException()
     scen = Scenario({'cs': test_helpers.get_branin_config_space(),
                      'run_obj': 'quality', 'output_dir': '',
                      'abort_on_first_run_crash': 1})
     smbo = SMAC(scen, tae_runner=target, rng=1).solver
     self.assertRaises(FirstRunCrashedException, smbo.run)
Ejemplo n.º 7
0
    def test_write(self):
        # The nulls make sure that we correctly emit the python None value
        fixture = '{"data": [[[1, "branin", 1], [1, 1, {"__enum__": ' \
                  '"StatusType.SUCCESS"}, null]], ' \
                  '[[1, "branini", 1], [1, 1, {"__enum__": ' \
                  '"StatusType.SUCCESS"}, null]], ' \
                  '[[2, "branini", 1], [1, 1, {"__enum__": ' \
                  '"StatusType.SUCCESS"}, null]], ' \
                  '[[2, null, 1], [1, 1, {"__enum__": ' \
                  '"StatusType.SUCCESS"}, null]], ' \
                  '[[3, "branin-hoo", 1], [1, 1, {"__enum__": ' \
                  '"StatusType.SUCCESS"}, null]], ' \
                  '[[4, null, 1], [1, 1, {"__enum__": ' \
                  '"StatusType.SUCCESS"}, null]]],' \
                  '"configs": {' \
                  '"4": {"x": -2.2060968293349363, "y": 5.183410905645716}, ' \
                  '"3": {"x": -2.7986616377433045, "y": 1.385078921531967}, ' \
                  '"1": {"x": 1.2553300705386103, "y": 10.804867401632372}, ' \
                  '"2": {"x": -4.998284377739827, "y": 4.534988589477597}}}'

        run_history = RunHistory(aggregate_func=average_cost)
        configuration_space = test_helpers.get_branin_config_space()
        configuration_space.seed(1)

        config = configuration_space.sample_configuration()
        # Config on two instances
        run_history.add(config, 1, 1, StatusType.SUCCESS, seed=1,
                        instance_id='branin')
        run_history.add(config, 1, 1, StatusType.SUCCESS, seed=1,
                        instance_id='branini')
        config_2 = configuration_space.sample_configuration()
        # Another config on a known instance
        run_history.add(config_2, 1, 1, StatusType.SUCCESS, seed=1,
                        instance_id='branini')
        # Known Config on no instance
        run_history.add(config_2, 1, 1, StatusType.SUCCESS, seed=1)
        # New config on new instance
        config_3 = configuration_space.sample_configuration()
        run_history.add(config_3, 1, 1, StatusType.SUCCESS, seed=1,
                        instance_id='branin-hoo')
        # New config on no instance
        config_4 = configuration_space.sample_configuration()
        run_history.add(config_4, 1, 1, StatusType.SUCCESS, seed=1)

        pSMAC.write(run_history, self.tmp_dir, 20)

        output_filename = os.path.join(self.tmp_dir, '.runhistory_20.json')
        self.assertTrue(os.path.exists(output_filename))

        fixture = json.loads(fixture, object_hook=StatusType.enum_hook)
        with open(output_filename) as fh:
            output = json.load(fh, object_hook=StatusType.enum_hook)

        print(output)
        print(fixture)
        self.assertEqual(output, fixture)
 def setUp(self):
     self.scenario = Scenario({
         "cs": test_helpers.get_branin_config_space(),
         "run_obj": "quality",
         "output_dir": "data-test_epmchooser",
         "deterministic": False,
         "limit_resources": True,
     })
     self.output_dirs = []
     self.output_dirs.append(self.scenario.output_dir)
Ejemplo n.º 9
0
 def get_smbo(intensification_perc):
     """Return SMBO with intensification_percentage."""
     scen = Scenario({
         "cs": test_helpers.get_branin_config_space(),
         "run_obj": "quality",
         "output_dir": "data-test_smbo-intensification",
         "intensification_percentage": intensification_perc,
         "deterministic": False,
         "limit_resources": True,
     })
     self.output_dirs.append(scen.output_dir)
     return SMAC4AC(scen, tae_runner=target, rng=1).solver
Ejemplo n.º 10
0
    def test_abort_on_initial_design(self, patch):
        def target(x):
            return 5

        # should raise an error if abort_on_first_run_crash is True
        patch.side_effect = FirstRunCrashedException()
        scen = Scenario({
            "cs": test_helpers.get_branin_config_space(),
            "run_obj": "quality",
            "output_dir": "data-test_smbo-abort",
            "abort_on_first_run_crash": True,
            "deterministic": False,
            "limit_resources": True,
        })
        self.output_dirs.append(scen.output_dir)
        smbo = SMAC4AC(scen, tae_runner=target, rng=1).solver
        with self.assertRaisesRegex(FirstRunCrashedException, "in _mock_call"):
            smbo.run()

        # should not raise an error if abort_on_first_run_crash is False
        patch.side_effect = FirstRunCrashedException()
        scen = Scenario({
            "cs": test_helpers.get_branin_config_space(),
            "run_obj": "quality",
            "output_dir": "data-test_smbo-abort",
            "abort_on_first_run_crash": False,
            "wallclock-limit": 1,
            "deterministic": False,
            "limit_resources": True,
        })
        self.output_dirs.append(scen.output_dir)
        smbo = SMAC4AC(scen, tae_runner=target, rng=1).solver

        try:
            smbo.start()
            smbo.run()
        except FirstRunCrashedException:
            self.fail("Raises FirstRunCrashedException unexpectedly!")
Ejemplo n.º 11
0
 def test_eips(self):
     scenario = Scenario({'cs': test_helpers.get_branin_config_space(),
                          'run_obj': 'quality',
                          'deterministic': True,
                          'output_dir': ''})
     types = get_types(scenario.cs, None)
     umrfwi = UncorrelatedMultiObjectiveRandomForestWithInstances(
         ['cost', 'runtime'], types)
     eips = EIPS(umrfwi)
     rh2EPM = RunHistory2EPM4EIPS(scenario, 2)
     taf = ExecuteTAFunc(test_function)
     smbo = SMBO(scenario, model=umrfwi, acquisition_function=eips,
                 runhistory2epm=rh2EPM, tae_runner=taf,
                 random_configuration_chooser=ChooserNoCoolDown(2.0))
     smbo.run(5)
     print(smbo.incumbent)
     raise ValueError()
Ejemplo n.º 12
0
    def test_get_next_by_local_search(
            self,
            _get_initial_points_patch,
            patch
    ):
        # Without known incumbent
        class SideEffect(object):

            def __call__(self, *args, **kwargs):
                rval = []
                for i in range(len(args[0])):
                    rval.append((i, ConfigurationMock(i)))
                return rval

        patch.side_effect = SideEffect()
        cs = test_helpers.get_branin_config_space()
        rand_confs = cs.sample_configuration(size=9)
        _get_initial_points_patch.return_value = rand_confs
        acq_func = EI(None)

        ls = LocalSearch(acq_func, cs)

        # To have some data in a mock runhistory
        runhistory = unittest.mock.Mock()
        runhistory.data = [None] * 1000

        rval = ls._maximize(runhistory, None, 9)
        self.assertEqual(len(rval), 9)
        self.assertEqual(patch.call_count, 1)
        for i in range(9):
            self.assertIsInstance(rval[i][1], ConfigurationMock)
            self.assertEqual(rval[i][1].value, 8 - i)
            self.assertEqual(rval[i][0], 8 - i)
            self.assertEqual(rval[i][1].origin, 'Local Search')

        # Check that the known 'incumbent' is transparently passed through
        patch.side_effect = SideEffect()
        _get_initial_points_patch.return_value = ['Incumbent'] + rand_confs
        rval = ls._maximize(runhistory, None, 10)
        self.assertEqual(len(rval), 10)
        self.assertEqual(patch.call_count, 2)
        # Only the first local search in each iteration starts from the
        # incumbent
        self.assertEqual(patch.call_args_list[1][0][0][0], 'Incumbent')
        for i in range(10):
            self.assertEqual(rval[i][1].origin, 'Local Search')
Ejemplo n.º 13
0
    def test_get_next_by_local_search(self, _get_initial_points_patch, patch):
        # Without known incumbent
        class SideEffect(object):
            def __init__(self):
                self.call_number = 0

            def __call__(self, *args, **kwargs):
                rval = 9 - self.call_number
                self.call_number += 1
                return (rval, ConfigurationMock(rval))

        patch.side_effect = SideEffect()
        cs = test_helpers.get_branin_config_space()
        rand_confs = cs.sample_configuration(size=9)
        _get_initial_points_patch.return_value = rand_confs
        acq_func = EI(None)

        ls = LocalSearch(acq_func, cs)

        # To have some data in a mock runhistory
        runhistory = unittest.mock.Mock()
        runhistory.data = [None] * 1000

        rval = ls._maximize(runhistory, None, 9)
        self.assertEqual(len(rval), 9)
        self.assertEqual(patch.call_count, 9)
        for i in range(9):
            self.assertIsInstance(rval[i][1], ConfigurationMock)
            self.assertEqual(rval[i][1].value, 9 - i)
            self.assertEqual(rval[i][0], 9 - i)
            self.assertEqual(rval[i][1].origin, 'Local Search')

        # With known incumbent
        patch.side_effect = SideEffect()
        _get_initial_points_patch.return_value = ['Incumbent'] + rand_confs
        rval = ls._maximize(runhistory, None, 10)
        self.assertEqual(len(rval), 10)
        self.assertEqual(patch.call_count, 19)
        # Only the first local search in each iteration starts from the
        # incumbent
        self.assertEqual(patch.call_args_list[9][0][0], 'Incumbent')
        for i in range(10):
            self.assertEqual(rval[i][1].origin, 'Local Search')
Ejemplo n.º 14
0
    def test_stop_smbo(self, patch):
        def target(x):
            return 5

        # should raise an error if abort_on_first_run_crash is True
        patch.return_value = StatusType.STOP, 0.5, 0.5, {}
        scen = Scenario({
            'cs': test_helpers.get_branin_config_space(),
            'run_obj': 'quality',
            'output_dir': 'data-test_smbo-abort',
            'abort_on_first_run_crash': True
        })
        self.output_dirs.append(scen.output_dir)
        smbo = SMAC4AC(scen, tae_runner=target, rng=1)
        self.assertFalse(smbo.solver._stop)
        smbo.optimize()
        self.assertEqual(len(smbo.runhistory.data), 1)
        self.assertEqual(
            list(smbo.runhistory.data.values())[0].status, StatusType.RUNNING)
        self.assertTrue(smbo.solver._stop)
Ejemplo n.º 15
0
    def test_stop_smbo(self, patch):
        def target(x):
            return 5

        # should raise an error if abort_on_first_run_crash is True
        patch.return_value = StatusType.STOP, 0.5, 0.5, {}
        scen = Scenario({
            "cs": test_helpers.get_branin_config_space(),
            "run_obj": "quality",
            "output_dir": "data-test_smbo-abort",
            "abort_on_first_run_crash": True,
            "deterministic": False,
            "limit_resources": True,
        })
        self.output_dirs.append(scen.output_dir)
        smbo = SMAC4AC(scen, tae_runner=target, rng=1)
        self.assertFalse(smbo.solver._stop)
        smbo.optimize()
        self.assertEqual(len(smbo.runhistory.data), 1)
        # After an optimization, we expect no running instances.
        self.assertEqual(
            list(smbo.runhistory.data.values())[0].status, StatusType.STOP)
        self.assertTrue(smbo.solver._stop)
Ejemplo n.º 16
0
 def setUp(self):
     self.scenario = Scenario({'cs': test_helpers.get_branin_config_space(),
                               'run_obj': 'quality',
                               'output_dir': ''})
Ejemplo n.º 17
0
    def test_load(self):
        configuration_space = test_helpers.get_branin_config_space()

        other_runhistory = '{"data": [[[2, "branini", 1], [1, 1, 1, null]], ' \
        '[[1, "branin", 1], [1, 1, 1, null]], ' \
        '[[3, "branin-hoo", 1], [1, 1, 1, null]], ' \
        '[[2, null, 1], [1, 1, 1, null]], ' \
        '[[1, "branini", 1], [1, 1, 1, null]], ' \
        '[[4, null, 1], [1, 1, 1, null]]], ' \
        '"configs": {' \
        '"4": {"x": -2.2060968293349363, "y": 5.183410905645716}, ' \
        '"3": {"x": -2.7986616377433045, "y": 1.385078921531967}, ' \
        '"1": {"x": 1.2553300705386103, "y": 10.804867401632372}, ' \
        '"2": {"x": -4.998284377739827, "y": 4.534988589477597}}}'

        other_runhistory_filename = os.path.join(self.tmp_dir,
                                                 '.runhistory_20.json')
        with open(other_runhistory_filename, 'w') as fh:
            fh.write(other_runhistory)

        # load from an empty runhistory
        runhistory = RunHistory()
        runhistory.load_json(other_runhistory_filename, configuration_space)
        self.assertEqual(sorted(list(runhistory.ids_config.keys())),
                         [1, 2, 3, 4])
        self.assertEqual(len(runhistory.data), 6)

        # load from non-empty runhistory, but existing run will be overridden
        #  because it alread existed
        runhistory = RunHistory()
        configuration_space.seed(1)
        config = configuration_space.sample_configuration()
        runhistory.add(config, 1, 1, StatusType.SUCCESS, seed=1,
                        instance_id='branin')
        id_before = id(runhistory.data[runhistory.RunKey(1, 'branin', 1)])
        runhistory.update_from_json(other_runhistory_filename,
                                    configuration_space)
        id_after = id(runhistory.data[runhistory.RunKey(1, 'branin', 1)])
        self.assertEqual(len(runhistory.data), 6)
        self.assertNotEqual(id_before, id_after)

        # load from non-empty runhistory, but existing run will not be
        # overridden, but config_id will be re-used
        runhistory = RunHistory()
        configuration_space.seed(1)
        config = configuration_space.sample_configuration()
        config = configuration_space.sample_configuration()
        # This is the former config_3
        config = configuration_space.sample_configuration()
        runhistory.add(config, 1, 1, StatusType.SUCCESS, seed=1,
                       instance_id='branin')
        id_before = id(runhistory.data[runhistory.RunKey(1, 'branin', 1)])
        runhistory.update_from_json(other_runhistory_filename,
                                    configuration_space)
        id_after = id(runhistory.data[runhistory.RunKey(1, 'branin', 1)])
        self.assertEqual(len(runhistory.data), 7)
        self.assertEqual(id_before, id_after)
        print(runhistory.config_ids)
        self.assertEqual(sorted(list(runhistory.ids_config.keys())),
                         [1, 2, 3, 4])
        print(list(runhistory.data.keys()))
Ejemplo n.º 18
0
    def test_write(self):
        # The nulls make sure that we correctly emit the python None value
        fixture = '{"data": [[[1, "branin", 1], [1, 1, {"__enum__": ' \
                  '"StatusType.SUCCESS"}, null]], ' \
                  '[[1, "branini", 1], [1, 1, {"__enum__": ' \
                  '"StatusType.SUCCESS"}, null]], ' \
                  '[[2, "branini", 1], [1, 1, {"__enum__": ' \
                  '"StatusType.SUCCESS"}, null]], ' \
                  '[[2, null, 1], [1, 1, {"__enum__": ' \
                  '"StatusType.SUCCESS"}, null]], ' \
                  '[[3, "branin-hoo", 1], [1, 1, {"__enum__": ' \
                  '"StatusType.SUCCESS"}, null]], ' \
                  '[[4, null, 1], [1, 1, {"__enum__": ' \
                  '"StatusType.SUCCESS"}, null]]],' \
                  '"config_origins": {},' \
                  '"configs": {' \
                  '"4": {"x": -2.2060968293349363, "y": 5.183410905645716}, ' \
                  '"3": {"x": -2.7986616377433045, "y": 1.385078921531967}, ' \
                  '"1": {"x": 1.2553300705386103, "y": 10.804867401632372}, ' \
                  '"2": {"x": -4.998284377739827, "y": 4.534988589477597}}}'

        run_history = RunHistory(aggregate_func=average_cost)
        configuration_space = test_helpers.get_branin_config_space()
        configuration_space.seed(1)

        config = configuration_space.sample_configuration()
        # Config on two instances
        run_history.add(config,
                        1,
                        1,
                        StatusType.SUCCESS,
                        seed=1,
                        instance_id='branin')
        run_history.add(config,
                        1,
                        1,
                        StatusType.SUCCESS,
                        seed=1,
                        instance_id='branini')
        config_2 = configuration_space.sample_configuration()
        # Another config on a known instance
        run_history.add(config_2,
                        1,
                        1,
                        StatusType.SUCCESS,
                        seed=1,
                        instance_id='branini')
        # Known Config on no instance
        run_history.add(config_2, 1, 1, StatusType.SUCCESS, seed=1)
        # New config on new instance
        config_3 = configuration_space.sample_configuration()
        run_history.add(config_3,
                        1,
                        1,
                        StatusType.SUCCESS,
                        seed=1,
                        instance_id='branin-hoo')
        # New config on no instance
        config_4 = configuration_space.sample_configuration()
        run_history.add(config_4, 1, 1, StatusType.SUCCESS, seed=1)

        # External configuration which will not be written to json file!
        config_5 = configuration_space.sample_configuration()
        run_history.add(config_5,
                        1,
                        1,
                        StatusType.SUCCESS,
                        seed=1,
                        origin=DataOrigin.EXTERNAL_SAME_INSTANCES)

        logger = logging.getLogger("Test")
        pSMAC.write(run_history, self.tmp_dir, logger=logger)
        r_size = len(run_history.data)
        pSMAC.read(run_history=run_history,
                   output_dirs=[self.tmp_dir],
                   configuration_space=configuration_space,
                   logger=logger)
        self.assertEqual(
            r_size, len(run_history.data),
            "Runhistory should be the same and not changed after reading")

        output_filename = os.path.join(self.tmp_dir, 'runhistory.json')
        self.assertTrue(os.path.exists(output_filename))

        fixture = json.loads(fixture, object_hook=StatusType.enum_hook)
        with open(output_filename) as fh:
            output = json.load(fh, object_hook=StatusType.enum_hook)
        self.assertEqual(output, fixture)
Ejemplo n.º 19
0
    def test_load(self):
        configuration_space = test_helpers.get_branin_config_space()

        other_runhistory = '{"data": [[[2, "branini", 1], [1, 1,' \
                  '{"__enum__": "StatusType.SUCCESS"}, null]], ' \
                  '[[1, "branin", 1], [1, 1,' \
                  '{"__enum__": "StatusType.SUCCESS"}, null]], ' \
                  '[[3, "branin-hoo", 1], [1, 1,' \
                  '{"__enum__": "StatusType.SUCCESS"}, null]], ' \
                  '[[2, null, 1], [1, 1,' \
                  '{"__enum__": "StatusType.SUCCESS"}, null]], ' \
                  '[[1, "branini", 1], [1, 1,' \
                  '{"__enum__": "StatusType.SUCCESS"}, null]], ' \
                  '[[4, null, 1], [1, 1,' \
                  '{"__enum__": "StatusType.SUCCESS"}, null]]], ' \
                  '"configs": {' \
                  '"4": {"x": -2.2060968293349363, "y": 5.183410905645716}, ' \
                  '"3": {"x": -2.7986616377433045, "y": 1.385078921531967}, ' \
                  '"1": {"x": 1.2553300705386103, "y": 10.804867401632372}, ' \
                  '"2": {"x": -4.998284377739827, "y": 4.534988589477597}}}'

        other_runhistory_filename = os.path.join(self.tmp_dir,
                                                 'runhistory.json')
        with open(other_runhistory_filename, 'w') as fh:
            fh.write(other_runhistory)

        # load from an empty runhistory
        runhistory = RunHistory(aggregate_func=average_cost)
        runhistory.load_json(other_runhistory_filename, configuration_space)
        self.assertEqual(sorted(list(runhistory.ids_config.keys())),
                         [1, 2, 3, 4])
        self.assertEqual(len(runhistory.data), 6)

        # load from non-empty runhistory, in case of a duplicate the existing
        # result will be kept and the new one silently discarded
        runhistory = RunHistory(aggregate_func=average_cost)
        configuration_space.seed(1)
        config = configuration_space.sample_configuration()
        runhistory.add(config,
                       1,
                       1,
                       StatusType.SUCCESS,
                       seed=1,
                       instance_id='branin')
        id_before = id(runhistory.data[RunKey(1, 'branin', 1)])
        runhistory.update_from_json(other_runhistory_filename,
                                    configuration_space)
        id_after = id(runhistory.data[RunKey(1, 'branin', 1)])
        self.assertEqual(len(runhistory.data), 6)
        self.assertEqual(id_before, id_after)

        # load from non-empty runhistory, in case of a duplicate the existing
        # result will be kept and the new one silently discarded
        runhistory = RunHistory(aggregate_func=average_cost)
        configuration_space.seed(1)
        config = configuration_space.sample_configuration()
        config = configuration_space.sample_configuration()
        # This is the former config_3
        config = configuration_space.sample_configuration()
        runhistory.add(config,
                       1,
                       1,
                       StatusType.SUCCESS,
                       seed=1,
                       instance_id='branin')
        id_before = id(runhistory.data[RunKey(1, 'branin', 1)])
        runhistory.update_from_json(other_runhistory_filename,
                                    configuration_space)
        id_after = id(runhistory.data[RunKey(1, 'branin', 1)])
        self.assertEqual(len(runhistory.data), 7)
        self.assertEqual(id_before, id_after)
        self.assertEqual(sorted(list(runhistory.ids_config.keys())),
                         [1, 2, 3, 4])
        self.assertEqual(
            [runhistory.external[run_key] for run_key in runhistory.data],
            [DataOrigin.INTERNAL] + [DataOrigin.EXTERNAL_SAME_INSTANCES] * 6)