Beispiel #1
0
    def test_refund(self):
        data = np.array([0, 2, 0])
        correct_class = _ground_truth(data)

        orig_address = "Orig"
        bal = 1E5
        self.balances.initialize(orig_address, bal)
        msg = Msg(orig_address, 1E3)
        self.time_method.set_time(self.time_method() + 1)
        added_time = self.time_method()
        self.decai.add_data(msg, data, correct_class)
        self.assertLess(self.balances[orig_address], bal)

        # Add same data from another address.
        msg = Msg(self.good_address, 1E3)
        self.time_method.set_time(self.time_method() + 1)
        bal = self.balances[self.good_address]
        self.decai.add_data(msg, data, correct_class)
        self.assertLess(self.balances[self.good_address], bal)

        # Original address refunds.
        msg = Msg(orig_address, 1E3)
        bal = self.balances[orig_address]
        self.time_method.set_time(self.time_method() +
                                  self.decai.im.refund_time_s + 1)
        self.decai.refund(msg, data, correct_class, added_time)
        self.assertGreater(self.balances[orig_address], bal)
Beispiel #2
0
    def test_report_take_all(self):
        data = np.array([0, 0, 0])
        correct_class = _ground_truth(data)
        submitted_classification = 1 - correct_class
        # Add bad data.
        malicious_address = 'malicious_take_backer'
        self.balances.initialize(malicious_address, 1E6)
        bal = self.balances[malicious_address]
        msg = Msg(malicious_address, bal)
        self.time_method.set_time(self.time_method() + 1)
        added_time = self.time_method()
        self.decai.add_data(msg, data, submitted_classification)
        self.assertLess(self.balances[malicious_address], bal,
                        "Adding data should have a cost.")

        self.time_method.set_time(self.time_method() +
                                  self.decai.im.any_address_claim_wait_time_s +
                                  1)

        # Can't refund.
        msg = Msg(malicious_address, self.balances[malicious_address])
        try:
            self.decai.refund(msg, data, submitted_classification, added_time)
            self.fail("Should have failed.")
        except RejectException as e:
            self.assertEqual("The model doesn't agree with your contribution.",
                             e.args[0])

        bal = self.balances[malicious_address]
        msg = Msg(malicious_address, bal)
        self.decai.report(msg, data, submitted_classification, added_time,
                          malicious_address)
        self.assertGreater(self.balances[malicious_address], bal)
Beispiel #3
0
    def test_report(self):
        inj = Injector([
            SimpleDataModule,
            LoggingModule,
            PerceptronModule,
            PredictionMarketImModule(
                allow_greater_deposit=True,
                group_contributions=True,
                reset_model_during_reward_phase=True,
            ),
        ])
        balances = inj.get(Balances)
        data = inj.get(DataLoader)
        im = cast(PredictionMarket, inj.get(IncentiveMechanism))
        im.owner = 'owner'
        time_method = inj.get(TimeMock)

        assert isinstance(im, PredictionMarket)

        init_train_data_portion = 0.2

        initializer_address = 'initializer'
        total_bounty = 100_000
        balances.initialize(initializer_address, total_bounty)

        good_contributor_address = 'good_contributor'
        initial_good_balance = 10_000
        balances.initialize(good_contributor_address, initial_good_balance)

        bad_contributor_address = 'bad_contributor'
        initial_bad_balance = 10_000
        balances.initialize(bad_contributor_address, initial_bad_balance)

        (x_train, y_train), (x_test, y_test) = data.load_data()

        init_idx = int(len(x_train) * init_train_data_portion)
        assert init_idx > 0

        x_init_data, y_init_data = x_train[:init_idx], y_train[:init_idx]
        x_remaining, y_remaining = x_train[init_idx:], y_train[init_idx:]

        # Split test set into pieces.
        num_pieces = 10
        test_dataset_hashes, test_sets = im.get_test_set_hashes(num_pieces, x_test, y_test)

        # Ending criteria:
        min_length_s = 100
        min_num_contributions = min(len(x_remaining), 100)

        # Commitment Phase
        self.assertIsNone(im.state)
        im.model.init_model(x_init_data, y_init_data)
        test_reveal_index = im.initialize_market(Msg(initializer_address, total_bounty),
                                                 test_dataset_hashes,
                                                 min_length_s, min_num_contributions)
        self.assertEqual(MarketPhase.INITIALIZATION, im.state)
        assert 0 <= test_reveal_index < len(test_dataset_hashes)
        im.reveal_init_test_set(test_sets[test_reveal_index])

        self.assertEqual(MarketPhase.PARTICIPATION, im.state)
        # Participation Phase
        value = 100
        total_deposits = defaultdict(float)
        stored_data = None
        for i in range(min_num_contributions):
            time_method.add_time(60)
            data = x_remaining[i]
            classification = y_remaining[i]
            if i % 2 == 0:
                contributor = good_contributor_address
            else:
                contributor = bad_contributor_address
                classification = 1 - classification
            cost, _ = im.handle_add_data(contributor, value, data, classification)
            if stored_data is None:
                stored_data = StoredData(classification, time_method(), contributor, cost, cost)
            balances.send(contributor, im.owner, cost)
            total_deposits[contributor] += cost

        # Reward Phase
        self.assertEqual(MarketPhase.PARTICIPATION, im.state)
        im.end_market()
        time_method.add_time(60)
        self.assertEqual(MarketPhase.REVEAL_TEST_SET, im.state)
        for i, test_set_portion in enumerate(test_sets):
            if i != test_reveal_index:
                im.verify_next_test_set(test_set_portion)
        self.assertEqual(MarketPhase.REWARD_RESTART, im.state)
        while im.remaining_bounty_rounds > 0:
            time_method.add_time(60)
            im.process_contribution()

        # Collect rewards.
        self.assertEqual(MarketPhase.REWARD_COLLECT, im.state)

        # Get some stored data.

        # Make sure reporting doesn't work yet.
        reward = im.handle_report(bad_contributor_address, stored_data, False, None)
        self.assertEqual(0, reward, "There should be no reward yet.")

        time_method.add_time(im.any_address_claim_wait_time_s)
        reward = im.handle_report(bad_contributor_address, stored_data, False, None)
        balances.send(im.owner, bad_contributor_address, reward)

        # Don't need to pass the right StoredData.
        # noinspection PyTypeChecker
        reward = im.handle_refund(bad_contributor_address, None, 0, False, None)
        balances.send(im.owner, bad_contributor_address, reward)

        # General checks that should be true for a market with a reasonably sensitive model.
        self.assertLess(balances[im.owner], total_bounty,
                        f"Some of the bounty should be distributed.\n"
                        f"Balances: {balances.get_all()}")
        self.assertLess(0, balances[im.owner])

        self.assertGreater(total_deposits[good_contributor_address], 0)
        self.assertGreater(total_deposits[bad_contributor_address], 0)

        # The bad contributor profited because they reported the good contributor.
        self.assertGreater(balances[bad_contributor_address], initial_bad_balance)
        self.assertLess(balances[good_contributor_address], initial_good_balance)

        self.assertLess(balances[good_contributor_address], balances[bad_contributor_address])
        self.assertLessEqual(balances[bad_contributor_address] - balances[good_contributor_address],
                             total_bounty)
        self.assertEqual(initial_good_balance + initial_bad_balance + total_bounty,
                         balances[good_contributor_address] + balances[bad_contributor_address] +
                         balances[im.owner],
                         "Should be a zero-sum.")

        self.assertEqual(initial_good_balance - total_deposits[good_contributor_address],
                         balances[good_contributor_address],
                         "The good contributor should lose all of their deposits.")
Beispiel #4
0
    def test_market_like_original_paper(self):
        inj = Injector([
            SimpleDataModule,
            LoggingModule,
            PerceptronModule,
            PredictionMarketImModule(
                allow_greater_deposit=False,
                group_contributions=False,
                reset_model_during_reward_phase=False,
            ),
        ])

        balances = inj.get(Balances)
        data = inj.get(DataLoader)
        im = cast(PredictionMarket, inj.get(IncentiveMechanism))
        im.owner = 'owner'
        assert isinstance(im, PredictionMarket)

        init_train_data_portion = 0.2

        initializer_address = 'initializer'
        total_bounty = 100_000
        balances.initialize(initializer_address, total_bounty)

        good_contributor_address = 'good_contributor'
        initial_good_balance = 10_000
        balances.initialize(good_contributor_address, initial_good_balance)

        bad_contributor_address = 'bad_contributor'
        initial_bad_balance = 10_000
        balances.initialize(bad_contributor_address, initial_bad_balance)

        (x_train, y_train), (x_test, y_test) = data.load_data()

        init_idx = int(len(x_train) * init_train_data_portion)
        assert init_idx > 0

        x_init_data, y_init_data = x_train[:init_idx], y_train[:init_idx]
        x_remaining, y_remaining = x_train[init_idx:], y_train[init_idx:]

        # Split test set into pieces.
        num_pieces = 10
        test_dataset_hashes, test_sets = im.get_test_set_hashes(num_pieces, x_test, y_test)

        # Ending criteria:
        min_length_s = 100
        min_num_contributions = min(len(x_remaining), 100)

        # Commitment Phase
        self.assertIsNone(im.state)

        im.model.init_model(x_init_data, y_init_data)

        hashes_split = 3
        test_reveal_index = im.initialize_market(Msg(initializer_address, total_bounty),
                                                 test_dataset_hashes[:hashes_split],
                                                 min_length_s, min_num_contributions)
        assert 0 <= test_reveal_index < len(test_dataset_hashes)
        self.assertEqual(MarketPhase.INITIALIZATION, im.state)

        test_reveal_index = im.add_test_set_hashes(Msg(initializer_address, 0), test_dataset_hashes[hashes_split:])
        assert 0 <= test_reveal_index < len(test_dataset_hashes)
        self.assertEqual(MarketPhase.INITIALIZATION, im.state)

        im.reveal_init_test_set(test_sets[test_reveal_index])

        self.assertEqual(MarketPhase.PARTICIPATION, im.state)
        # Participation Phase
        value = 100
        total_deposits = defaultdict(float)
        for i in range(min_num_contributions):
            data = x_remaining[i]
            classification = y_remaining[i]
            if i % 2 == 0:
                contributor = good_contributor_address
            else:
                contributor = bad_contributor_address
                classification = 1 - classification
            cost, _ = im.handle_add_data(contributor, value, data, classification)
            self.assertEqual(im.min_stake, cost, "Cost should be the minimum stake because of the options passed in.")
            balances.send(contributor, im.owner, cost)
            total_deposits[contributor] += cost

        # Reward Phase
        self.assertEqual(MarketPhase.PARTICIPATION, im.state)
        im.end_market()
        self.assertEqual(MarketPhase.REVEAL_TEST_SET, im.state)
        for i, test_set_portion in enumerate(test_sets):
            if i != test_reveal_index:
                im.verify_next_test_set(test_set_portion)
        self.assertEqual(MarketPhase.REWARD_RESTART, im.state)
        while im.remaining_bounty_rounds > 0:
            im.process_contribution()

        # Collect rewards.
        self.assertEqual(MarketPhase.REWARD_COLLECT, im.state)
        for contributor in [good_contributor_address, bad_contributor_address]:
            # Don't need to pass the right StoredData.
            # noinspection PyTypeChecker
            reward = im.handle_refund(contributor, None, 0, False, None)
            balances.send(im.owner, contributor, reward)

        self.assertGreater(total_deposits[good_contributor_address], 0)
        self.assertGreater(total_deposits[bad_contributor_address], 0)

        # General checks that should be true for a market with a reasonably sensitive model.
        self.assertLess(balances[im.owner], total_bounty,
                        f"Some of the bounty should be distributed.\n"
                        f"Balances: {balances.get_all()}")
        self.assertLess(0, balances[im.owner])

        # Sometimes the bad contributor happens to get some value but not much.
        self.assertAlmostEqual(balances[bad_contributor_address], initial_bad_balance, delta=2,
                                msg=f"The bad contributor should lose funds.\n"
                                f"Balances: {balances.get_all()}")
        self.assertGreater(balances[good_contributor_address], initial_good_balance)
        self.assertLess(balances[bad_contributor_address], balances[good_contributor_address])
        self.assertLessEqual(balances[good_contributor_address] - balances[bad_contributor_address],
                             total_bounty)
        self.assertEqual(initial_good_balance + initial_bad_balance + total_bounty,
                         balances[good_contributor_address] + balances[bad_contributor_address] +
                         balances[im.owner],
                         "Should be a zero-sum.")
Beispiel #5
0
    def setUpClass(cls):
        inj = Injector([
            DefaultCollaborativeTrainerModule,
            LoggingModule,
            PerceptronModule,
            StakeableImModule,
        ])
        cls.balances = inj.get(Balances)
        cls.decai = inj.get(CollaborativeTrainer)
        cls.time_method = inj.get(TimeMock)

        cls.good_address = 'sender'
        initial_balance = 1E6
        cls.balances.initialize(cls.good_address, initial_balance)
        msg = Msg(cls.good_address, cls.balances[cls.good_address])

        X = np.array([
            # Initialization Data
            [0, 0, 0],
            [1, 1, 1],

            # Data to Add
            [0, 0, 1],
            [0, 1, 0],
            [0, 1, 1],
            [1, 0, 0],
            [1, 0, 1],
            [1, 1, 0],
        ])
        y = np.array([_ground_truth(x) for x in X])
        cls.decai.model.init_model(np.array([X[0, :], X[1, :]]),
                                   np.array([y[0], y[1]]))
        score = cls.decai.model.evaluate(X, y)
        assert score != 1, "Model shouldn't fit the data yet."

        # Add all data.
        first_added_time = None
        for i in range(X.shape[0]):
            x = X[i]
            cls.time_method.set_time(cls.time_method() + 1)
            if first_added_time is None:
                first_added_time = cls.time_method()
            cls.decai.add_data(msg, x, y[i])

        for _ in range(1000):
            score = cls.decai.model.evaluate(X, y)
            if score >= 1:
                break
            i = random.randint(0, X.shape[0] - 1)
            x = X[i]
            cls.time_method.set_time(cls.time_method() + 1)
            cls.decai.add_data(msg, x, y[i])
        assert score == 1, "Model didn't fit the data."

        bal = cls.balances[msg.sender]
        assert bal < initial_balance, "Adding data should have a cost."

        # Make sure sender has some good data refunded so that they can report data later.
        cls.time_method.set_time(cls.time_method() +
                                 cls.decai.im.refund_time_s + 1)
        cls.decai.refund(msg, X[0], y[0], first_added_time)
        assert cls.balances[msg.sender] > bal, "Refunding should return value."
Beispiel #6
0
        def task():
            (x_train, y_train), (x_test, y_test) = \
                self._data_loader.load_data(train_size=train_size, test_size=test_size)
            init_idx = int(len(x_train) * init_train_data_portion)
            self._logger.info("Initializing model with %d out of %d samples.",
                              init_idx, len(x_train))
            x_init_data, y_init_data = x_train[:init_idx], y_train[:init_idx]
            x_remaining, y_remaining = x_train[init_idx:], y_train[init_idx:]

            self._decai.model.init_model(x_init_data, y_init_data)

            if self._logger.isEnabledFor(logging.DEBUG):
                s = self._decai.model.evaluate(x_init_data, y_init_data)
                self._logger.debug("Initial training data evaluation: %s", s)
                s = self._decai.model.evaluate(x_remaining, y_remaining)
                self._logger.debug("Remaining training data evaluation: %s", s)

            self._logger.info("Evaluating initial model.")
            accuracy = self._decai.model.evaluate(x_test, y_test)
            self._logger.info("Initial test set accuracy: %0.2f%%", accuracy * 100)
            t = self._time()
            doc.add_next_tick_callback(
                partial(plot_accuracy_cb, t=t, a=accuracy))

            q = PriorityQueue()
            random.shuffle(agents)
            for agent in agents:
                self._balances.initialize(agent.address, agent.start_balance)
                q.put((self._time() + agent.get_next_wait_s(), agent))
                doc.add_next_tick_callback(
                    partial(plot_cb, agent=agent, t=t, b=agent.start_balance))

            unclaimed_data = []
            next_data_index = 0
            next_accuracy_plot_time = 1E4
            desc = "Processing agent requests"
            with tqdm(desc=desc,
                      unit_scale=True, mininterval=2, unit=" requests",
                      total=len(x_remaining),
                      ) as pbar:
                while not q.empty():
                    # For now assume sending a transaction (editing) is free (no gas)
                    # since it should be relatively cheaper than the deposit required to add data.
                    # It may not be cheaper than calling `report`.

                    if next_data_index >= len(x_remaining):
                        if not continuous_evaluation or len(unclaimed_data) == 0:
                            break

                    current_time, agent = q.get()
                    update_balance_plot = False
                    if current_time > next_accuracy_plot_time:
                        self._logger.debug("Evaluating.")
                        next_accuracy_plot_time += accuracy_plot_wait_s
                        accuracy = self._decai.model.evaluate(x_test, y_test)
                        doc.add_next_tick_callback(
                            partial(plot_accuracy_cb, t=current_time, a=accuracy))

                        if continuous_evaluation:
                            self._logger.debug("Unclaimed data: %d", len(unclaimed_data))
                            pbar.set_description(f"{desc} ({len(unclaimed_data)} unclaimed)")

                        with open(save_path, 'w') as f:
                            json.dump(save_data, f, separators=(',', ':'))

                        if os.path.exists(plot_save_path):
                            os.remove(plot_save_path)
                        export_png(plot, plot_save_path)

                    self._time.set_time(current_time)

                    balance = self._balances[agent.address]
                    if balance > 0 and next_data_index < len(x_remaining):
                        # Pick data.
                        x, y = x_remaining[next_data_index], y_remaining[next_data_index]

                        if agent.calls_model:
                            # Only call the model if it's good.
                            if random.random() < accuracy:
                                update_balance_plot = True
                                self._decai.predict(Msg(agent.address, agent.pay_to_call), x)
                        else:
                            if not agent.good:
                                y = 1 - y
                            if agent.prob_mistake > 0 and random.random() < agent.prob_mistake:
                                y = 1 - y

                            # Bad agents always contribute.
                            # Good agents will only work if the model is doing well.
                            # Add a bit of chance they will contribute since 0.85 accuracy is okay.
                            if not agent.good or random.random() < accuracy + 0.15:
                                value = agent.get_next_deposit()
                                if value > balance:
                                    value = balance
                                msg = Msg(agent.address, value)
                                try:
                                    self._decai.add_data(msg, x, y)
                                    # Don't need to plot every time. Plot less as we get more data.
                                    update_balance_plot = next_data_index / len(x_remaining) + 0.1 < random.random()
                                    balance = self._balances[agent.address]
                                    if continuous_evaluation:
                                        unclaimed_data.append((current_time, agent, x, y))
                                    next_data_index += 1
                                    pbar.update()
                                except RejectException:
                                    # Probably failed because they didn't pay enough which is okay.
                                    # Or if not enough time has passed since data was attempted to be added
                                    # which is okay too because a real contract would reject this
                                    # because the smallest unit of time we can use is 1s.
                                    if self._logger.isEnabledFor(logging.DEBUG):
                                        self._logger.exception("Error adding data.")

                    if balance > 0:
                        q.put((current_time + agent.get_next_wait_s(), agent))

                    claimed_indices = []
                    for i in range(len(unclaimed_data)):
                        added_time, adding_agent, x, classification = unclaimed_data[i]
                        if current_time - added_time < self._decai.im.refund_time_s:
                            break
                        if next_data_index >= len(x_remaining) \
                                and current_time - added_time < self._decai.im.any_address_claim_wait_time_s:
                            break
                        balance = self._balances[agent.address]
                        msg = Msg(agent.address, balance)

                        if current_time - added_time > self._decai.im.any_address_claim_wait_time_s:
                            # Attempt to take the entire deposit.
                            try:
                                self._decai.report(msg, x, classification, added_time, adding_agent.address)
                                update_balance_plot = True
                            except RejectException:
                                if self._logger.isEnabledFor(logging.DEBUG):
                                    self._logger.exception("Error taking reward.")
                        elif adding_agent.address == agent.address:
                            try:
                                self._decai.refund(msg, x, classification, added_time)
                                update_balance_plot = True
                            except RejectException:
                                if self._logger.isEnabledFor(logging.DEBUG):
                                    self._logger.exception("Error getting refund.")
                        else:
                            try:
                                self._decai.report(msg, x, classification, added_time, adding_agent.address)
                                update_balance_plot = True
                            except RejectException:
                                if self._logger.isEnabledFor(logging.DEBUG):
                                    self._logger.exception("Error taking reward.")

                        stored_data = self._decai.data_handler.get_data(x, classification,
                                                                        added_time, adding_agent.address)
                        if stored_data.claimable_amount <= 0:
                            claimed_indices.append(i)

                    for i in claimed_indices[::-1]:
                        unclaimed_data.pop(i)

                    if update_balance_plot:
                        balance = self._balances[agent.address]
                        doc.add_next_tick_callback(
                            partial(plot_cb, agent=agent, t=current_time, b=balance))

            self._logger.info("Done going through data.")
            if continuous_evaluation:
                pbar.set_description(f"{desc} ({len(unclaimed_data)} unclaimed)")

            if isinstance(self._decai.im, PredictionMarket):
                self._time.add_time(agents[0].get_next_wait_s())
                self._decai.im.end_market()
                for i, test_set_portion in enumerate(pm_test_sets):
                    if i != self._decai.im.test_reveal_index:
                        self._decai.im.verify_next_test_set(test_set_portion)
                with tqdm(desc="Processing contributions",
                          unit_scale=True, mininterval=2, unit=" contributions",
                          total=self._decai.im.get_num_contributions_in_market(),
                          ) as pbar:
                    finished_first_round_of_rewards = False
                    while self._decai.im.remaining_bounty_rounds > 0:
                        self._time.add_time(agents[0].get_next_wait_s())
                        self._decai.im.process_contribution()
                        pbar.update()

                        if not finished_first_round_of_rewards:
                            accuracy = self._decai.im.prev_acc
                            # If we plot too often then we end up with a blob instead of a line.
                            if random.random() < 0.1:
                                doc.add_next_tick_callback(
                                    partial(plot_accuracy_cb, t=self._time(), a=accuracy))

                        if self._decai.im.state == MarketPhase.REWARD_RESTART:
                            finished_first_round_of_rewards = True
                            if self._decai.im.reset_model_during_reward_phase:
                                # Update the accuracy after resetting all data.
                                accuracy = self._decai.im.prev_acc
                            else:
                                # Use the accuracy after training with all data.
                                pass
                            doc.add_next_tick_callback(
                                partial(plot_accuracy_cb, t=self._time(), a=accuracy))
                            pbar.total += self._decai.im.get_num_contributions_in_market()
                            self._time.add_time(self._time() * 0.001)

                            for agent in agents:
                                balance = self._balances[agent.address]
                                market_bal = self._decai.im._market_balances[agent.address]
                                self._logger.debug("\"%s\" market balance: %0.2f   Balance: %0.2f",
                                                   agent.address, market_bal, balance)
                                doc.add_next_tick_callback(
                                    partial(plot_cb, agent=agent, t=self._time(), b=max(balance + market_bal, 0)))

                self._time.add_time(self._time() * 0.02)
                for agent in agents:
                    msg = Msg(agent.address, 0)
                    # Find data submitted by them.
                    data = None
                    for key, stored_data in self._decai.data_handler:
                        if stored_data.sender == agent.address:
                            data = key[0]
                            break
                    if data is not None:
                        self._decai.refund(msg, np.array(data), stored_data.classification, stored_data.time)
                        balance = self._balances[agent.address]
                        doc.add_next_tick_callback(
                            partial(plot_cb, agent=agent, t=self._time(), b=balance))
                        self._logger.info("Balance for \"%s\": %.2f (%+.2f%%)",
                                          agent.address, balance,
                                          (balance - agent.start_balance) / agent.start_balance * 100)
                    else:
                        self._logger.warning("No data submitted by \"%s\" was found."
                                             "\nWill not update it's balance.", agent.address)

                self._logger.info("Done issuing rewards.")

            accuracy = self._decai.model.evaluate(x_test, y_test)
            doc.add_next_tick_callback(
                partial(plot_accuracy_cb, t=current_time, a=accuracy))

            with open(save_path, 'w') as f:
                json.dump(save_data, f, separators=(',', ':'))

            if os.path.exists(plot_save_path):
                os.remove(plot_save_path)
            export_png(plot, plot_save_path)
Beispiel #7
0
        assert init_idx > 0
        x_init_data, y_init_data = x_train[:init_idx], y_train[:init_idx]
        x_remaining, y_remaining = x_train[init_idx:], y_train[init_idx:]

        # Split test set into pieces.
        num_pieces = 10
        test_dataset_hashes, test_sets = self._im.get_test_set_hashes(
            num_pieces, x_test, y_test)

        # Ending criteria:
        min_length_s = 1_000
        min_num_contributions = len(x_remaining)

        self._im.model.init_model(x_init_data, y_init_data)
        test_reveal_index = self._im.initialize_market(
            Msg(initializer_address, total_bounty), test_dataset_hashes,
            min_length_s, min_num_contributions)
        assert 0 <= test_reveal_index < len(test_dataset_hashes)
        self._im.reveal_init_test_set(test_sets[test_reveal_index])

        # Accuracy on hidden test set after training with all training data:
        baseline_accuracies = {
            100: 0.6210,
            200: 0.6173,
            1000: 0.7945,
            10000: 0.84692,
            20000: 0.8484,
        }

        # Start the simulation.
        self._s.simulate(