def test_read_instances(self): bench = LubyBenchmark() bench.read_instance_set() self.assertTrue(len(bench.config.instance_set) == 1) self.assertTrue(len(bench.config.instance_set[0]) == 2) self.assertTrue(bench.config.instance_set[0] == [0, 0]) bench2 = LubyBenchmark() env = bench2.get_environment() self.assertTrue(len(env.instance_set[0]) == 2) self.assertTrue(env.instance_set[0] == [0, 0]) self.assertTrue(len(env.instance_set) == 1)
def test_fit(self): bench = LubyBenchmark() bench.config.instance_set_path = "../instance_sets/luby/luby_train.csv" bench.read_instance_set() instances = bench.config.instance_set env = bench.get_environment() wrapped = InstanceSamplingWrapper(env, instances=instances) samples = [] for _ in range(100): samples.append(wrapped.sampling_function()) mi1 = mutual_info_score(np.array(instances)[:, 0], np.array(samples)[:, 0]) mi2 = mutual_info_score(np.array(instances)[:, 1], np.array(samples)[:, 1]) self.assertTrue(mi1 > 0.99) self.assertTrue(mi1 != 1) self.assertTrue(mi2 > 0.99) self.assertTrue(mi2 != 1)