예제 #1
0
    def __init__(self, data_shape):
        self.data_shape = data_shape
        self.discriminator = None
        self.generator = None
        self.adversarial = None

        self.define_gan()
        self.noisy_samples = NoiseMaker(generator=self.generator)

        self.performance_output_path = 'performance/temp/'
        if not os.path.exists(self.performance_output_path):
            os.makedirs(self.performance_output_path)
예제 #2
0
    def __init__(self, data_shape):
        """
        Initialize SiDN-GAN
        """
        self.data_shape = data_shape
        self.discriminator = None
        self.generator = None
        self.adversarial = None

        self.define_gan()
        self.noise_maker = NoiseMaker(shape=self.data_shape, noise_type='s&p')

        self.performance_output_path = 'performance/siamese_dn_gan_' + str(
            datetime.now().date())
예제 #3
0
class SiameseDenoiseGAN:
    def __init__(self, data_shape):
        """
        Initialize SiDN-GAN
        """
        self.data_shape = data_shape
        self.discriminator = None
        self.generator = None
        self.adversarial = None

        self.define_gan()
        self.noise_maker = NoiseMaker(shape=self.data_shape, noise_type='s&p')

        self.performance_output_path = 'performance/siamese_dn_gan_' + str(
            datetime.now().date())

    def define_gan(self):
        """
        Build Models
            Generative model
            Discriminative model
            Adversarial model
        """
        self.generator = build_generator(input_shape=self.data_shape)
        self.discriminator = build_discriminator(input_shape=self.data_shape)

        self.adversarial = build_adversarial(
            generator_model=self.generator,
            discriminator_model=self.discriminator)

    def train(self, dataset, epochs=20, batch_size=64):
        """
        Train model
        """
        for e in range(epochs):
            print('Epochs: %3d/%d' % (e + 1, epochs))
            self.single_epoch(dataset, batch_size)
            self.performance(epoch=e, test_data=dataset.test_data)

    def single_epoch(self, dataset, batch_size):
        """
        Single iteration/epoch
            Iterate dataset as batch size
        """
        trained_samples = 0

        for realX, _ in dataset.iter_samples(batch_size):
            # Add noise to images and denoise them
            noisy = self.noise_maker.add_noise(real_samples=realX)
            fakeX = self.generator.predict(noisy)

            # Train discriminator for real vs fake, far samples
            Y = np.ones(shape=(len(realX), ))
            discriminator_loss_rf = self.discriminator.train_on_batch(
                [realX, fakeX], Y)

            # Train discriminator for fake vs noisy, close samples
            Y = np.zeros(shape=(len(realX), ))
            discriminator_loss_fn = self.discriminator.train_on_batch(
                [fakeX, noisy], Y)

            # Add noise to images
            noisy = self.noise_maker.add_noise(realX)
            act_real = np.zeros(shape=(len(noisy), ))

            # Train adversarial model with denoised images that are labeled like close to real images
            gan_loss = self.adversarial.train_on_batch([realX, noisy],
                                                       act_real)

            trained_samples = min(trained_samples + batch_size,
                                  dataset.sample_number)
            print(
                '     %5d/%d -> Discriminator Loss: [RvsF: %f, FvsN: %f], Gan Loss: %f'
                % (trained_samples, dataset.sample_number,
                   discriminator_loss_rf, discriminator_loss_fn, gan_loss))

    def performance(self, epoch, test_data):
        """
        Measure performance of model at each iteration
        """
        path = self.performance_output_path + '/epoch-%04d' % (epoch + 1)
        if not os.path.exists(path):
            os.makedirs(path)

        # Average SSIM index on test set
        mean_ssim(epoch, test_data, self.noise_maker, self.generator,
                  self.performance_output_path + '/result.txt')

        test_data = test_data[epoch * 100:(epoch + 1) * 100]

        # Generate denoised samples - add noise test images and denoise
        noisy = self.noise_maker.add_noise(real_samples=test_data)
        generated = self.generator.predict(noisy)

        # Save the generator model
        model_file = path + '/model_%04d.h5' % (epoch + 1)
        self.generator.save(model_file)

        # Save figures
        fig_file = path + '/plot_%04d' % ((epoch + 1))
        measure_and_plot(original_images=test_data,
                         noisy_images=noisy,
                         generated_images=generated,
                         path=fig_file)

        print('>Saved model and figures to', path)
예제 #4
0

# Warm up
x = 0
for i in range(BASE_ITERATIONS):
    x += 1

op = "dirichlet.rvs() (scipy frozen distribution, size 9)"
import scipy.stats  # noqa
d = scipy.stats.dirichlet([.2] * 9)
with time_operation(op, BASE_ITERATIONS) as op:
    for i in range(op.num_interations):
        d.rvs()

from noise_maker import NoiseMaker  # noqa
NOISE_MAKER = NoiseMaker(1000)
op = "NOISE_MAKER.make_noise(.2, 10)"
with time_operation(op, BASE_ITERATIONS) as op:
    for i in range(op.num_interations):
        NOISE_MAKER.make_noise(.2, 10)

op = "random.randint(0, 9999)"
with time_operation(op, BASE_ITERATIONS) as op:
    for i in range(op.num_interations):
        random.randint(0, 9999)

op = "[0.0 for x in agents]"
agents = [0, 1]
with time_operation(op, BASE_ITERATIONS) as op:
    for i in range(op.num_interations):
        s = [0.0 for x in agents]
예제 #5
0
    generated = generator.predict(noisy)

    # save the generator model
    model_file = path + '/model_%04d.h5' % (epoch + 1)
    generator.save(model_file)

    fig_file = path + '/plot_%04d' % ((epoch + 1))
    measure_and_plot(original_images=test_data,
                     noisy_images=noisy,
                     generated_images=generated,
                     path=fig_file)

    print('>Saved model and figures to', path)


if __name__ == '__main__':

    dataset = Dataset(dataset='caltech256')
    dataset.split_test_data(test_sample=2000)
    noise_maker = NoiseMaker(shape=dataset.data_shape, noise_type='s&p')

    model_folder = 'C:/PycharmProjects/NeuralNetworks-GAN/performance/caltech256-128x128-siamese_dn_gan_2019-12-21'

    for epoch in range(20):
        generator_path = model_folder + '/epoch-%04d' % (
            epoch + 1) + '/model_%04d.h5' % (epoch + 1)
        generator = load_model(generator_path)

        performance(generator, noise_maker, epoch, dataset.test_data,
                    model_folder)
예제 #6
0
class SiameseGAN:
    def __init__(self, data_shape):
        self.data_shape = data_shape
        self.discriminator = None
        self.generator = None
        self.adversarial = None

        self.define_gan()
        self.noisy_samples = NoiseMaker(generator=self.generator)

        self.performance_output_path = 'performance/temp/'
        if not os.path.exists(self.performance_output_path):
            os.makedirs(self.performance_output_path)

    def define_gan(self):
        self.generator = build_generator(input_shape=self.data_shape)
        self.discriminator = build_discriminator(input_shape=self.data_shape)

        self.adversarial = build_adversarial(
            generator_model=self.generator,
            discriminator_model=self.discriminator)

    def train(self, dataset, epochs=100, batch_size=64):

        for e in range(epochs):
            print('Epochs: %3d/%d' % (e, epochs))
            self.single_epoch(dataset, batch_size)
            self.performance(step=e, test_data=dataset.test_data)

    def single_epoch(self, dataset, batch_size):
        half_batch_size = int(batch_size / 2)
        trained_samples = 0

        for realX, _, realY in dataset.iter_samples(half_batch_size):
            Y = np.ones(shape=(len(realX), ))

            fakeX, fakeY, noiseX = self.noisy_samples.denoise_samples(
                real_samples=realX)

            Y = np.zeros(shape=(len(realX), ))
            discriminator_loss = self.discriminator.train_on_batch(
                [realX, fakeX, noiseX], Y)

            noisy_input = self.noisy_samples.add_noise(realX)
            act_real = np.ones(shape=(len(noisy_input), ))

            gan_loss = self.adversarial.train_on_batch(
                [realX, noisy_input, noisy_input], act_real)

            trained_samples = min(trained_samples + half_batch_size,
                                  dataset.sample_number)
            print('     %5d/%d -> Discriminator Loss: %f, Gan Loss: %f' %
                  (trained_samples, dataset.sample_number, discriminator_loss,
                   gan_loss))

    def performance(self, step, test_data):
        # prepare fake examples
        generated, _, _ = self.noisy_samples.denoise_samples(
            real_samples=test_data)
        # scale from [-1,1] to [0,1]
        generated = (generated + 1) / 2.0
        # plot images
        for i in range(100):
            # define subplot
            pyplot.subplot(10, 10, 1 + i)
            # turn off axis
            pyplot.axis('off')
            # plot raw pixel data
            pyplot.imshow(generated[i, :, :, 0], cmap='gray_r')
        # save plot to file
        fig_file = self.performance_output_path + 'generated_plot_%04d.png' % (
            step + 1)
        pyplot.savefig(fig_file)
        pyplot.close()
        # save the generator model
        model_file = self.performance_output_path + 'model_%04d.h5' % (step +
                                                                       1)
        self.generator.save(model_file)
        print('>Saved: %s and %s' % (fig_file, model_file))
예제 #7
0
class Img2ImgGAN:
    def __init__(self, data_shape):
        self.data_shape = data_shape
        self.discriminator = None
        self.generator = None
        self.adversarial = None

        self.define_gan()
        self.noisy_samples = NoiseMaker(generator=self.generator,
                                        shape=self.data_shape,
                                        noise_type='s&p')

        self.performance_output_path = 'performance/temp/'
        if not os.path.exists(self.performance_output_path):
            os.makedirs(self.performance_output_path)

    def define_gan(self):
        self.generator = build_generator(input_shape=self.data_shape)
        self.discriminator = build_discriminator(input_shape=self.data_shape)

        self.adversarial = build_adversarial(
            generator_model=self.generator,
            discriminator_model=self.discriminator)

    def train(self, dataset, epochs=100, batch_size=64):

        for e in range(epochs):
            print('Epochs: %3d/%d' % (e, epochs))
            self.single_epoch(dataset, batch_size)
            self.performance(step=e, test_data=dataset.test_data)

    def single_epoch(self, dataset, batch_size):
        half_batch_size = int(batch_size / 2)
        trained_samples = 0

        for realX, _, realY in dataset.iter_samples(half_batch_size):
            fakeX, fakeY, _ = self.noisy_samples.denoise_samples(
                real_samples=realX)
            X = np.vstack([realX, fakeX])
            Y = np.hstack([realY, fakeY])

            discriminator_loss = self.discriminator.train_on_batch(X, Y)

            noisy_input = self.noisy_samples.add_noise(realX)
            act_real = np.ones(shape=(len(noisy_input), ))

            gan_loss = self.adversarial.train_on_batch(noisy_input, act_real)

            trained_samples = min(trained_samples + half_batch_size,
                                  dataset.sample_number)
            print('     %5d/%d -> Discriminator Loss: %f, Gan Loss: %f' %
                  (trained_samples, dataset.sample_number, discriminator_loss,
                   gan_loss))

    def performance(self, step, test_data):

        sub_test_data = test_data[step * 50:(step + 1) * 50]

        # prepare fake examples
        generated, _, noise = self.noisy_samples.denoise_samples(
            real_samples=sub_test_data)

        # save plot to file
        fig_file = self.performance_output_path + 'epoch-%04d_plot.png' % (
            step + 1)
        data_triplet = np.concatenate([sub_test_data, noise, generated],
                                      axis=2)
        plot_images(data_triplet, path=fig_file)

        # save the generator model
        model_file = self.performance_output_path + 'model_%04d.h5' % (step +
                                                                       1)
        self.generator.save(model_file)
        print('>Saved: %s and %s' % (fig_file, model_file))
예제 #8
0
        pyplot.subplot(1, 4, 4)
        pyplot.axis('off')
        pyplot.imshow(original, interpolation='none')
        pyplot.text(x, y, '(d) Ground Truth', size=12, ha='center')

        img_path = path + '/image-%04d.png' % (i + 1)
        pyplot.savefig(img_path)
        pyplot.close()


if __name__ == '__main__':

    dataset = Dataset(dataset='caltech256')
    dataset.split_test_data(test_sample=2000)
    noise_maker = NoiseMaker(shape=dataset.data_shape, noise_type='s&p')

    # dataset_name = 'cifar10-32x32'
    # dataset_name = 'caltech256-64x64'
    dataset_name = 'caltech256-128x128'

    model_folder = 'C:/PycharmProjects/NeuralNetworks-GAN/performance/'

    path = model_folder + dataset_name + '_evaluate'
    if not os.path.exists(path):
        os.makedirs(path)

    best_dn_gan_path = model_folder + dataset_name + '-dn_gan_2019-12-24' + '/epoch-0018/model_0018.h5'
    best_sdn_gan_path = model_folder + dataset_name + '-siamese_dn_gan_2019-12-21' + '/epoch-0019/model_0019.h5'

    # generate fake examples
예제 #9
0
 def __post_init__(self):
     super().__post_init__()
     self.noise_maker = NoiseMaker(1000)
     if self.policy_overrides is None:
         self.policy_overrides = [None, None]
예제 #10
0
class MCTSAgent(Agent):
    game_tree: Any
    current_node: Any
    feature_extractor: Any
    value_model: Any
    policy_model: Any
    move_consideration_time: float
    puct_explore_factor: float
    puct_noise_alpha: float
    puct_noise_influence: float
    full_search_proportion: float
    full_search_steps: float
    partial_search_steps: float
    temperature: float = 0.0
    require_full_steps: bool = True
    revisit_violated_expectations: bool = False
    policy_overrides: List[Dict] = None  # [agent_0_overrides, ...]

    def __post_init__(self):
        super().__post_init__()
        self.noise_maker = NoiseMaker(1000)
        if self.policy_overrides is None:
            self.policy_overrides = [None, None]

    def set_agent_num(self, agent_num):
        super().set_agent_num(agent_num)

    def setup(self, initial_state):
        node = self.add_node(state=initial_state, parent_edge=None)
        self.game_tree = node
        self.current_node = node

    def handle_move(self, move, resulting_state):
        # :resulting_state is the observable state this agent can see.

        # Move game tree pointer
        # - If the move leads to an unexpanded state then add node. This can happen if
        #   opponent makes move first or a move this agent has never considered.
        edge_representing_move = self.current_node.get_child_edge(move)

        if edge_representing_move.child_node is None:
            # XXX: Does this affect mcts considerations?
            self.add_node(state=resulting_state,
                          parent_edge=edge_representing_move)

        self.current_node = edge_representing_move.child_node
        assert self.current_node is not None, "Current node must always exist"

        self.prune_game_search_tree()

    def prune_game_search_tree(self):
        '''
        Remove all the considered, unrealized, nodes/edges upstream of the
        current node in the game tree.  These were the nodes that MCTS
        considered, but the actions the players took didn't manifest in those
        tree pathways.  The stats of those pathways might be useful, but it's a
        lot of memory to keep around.

        The child edges of nodes that were visited ARE RETAINED because they contain
        the statistics (node visits) used to train the policy model.

        IF YOU DO NOT DO THIS THEN YOUR MEMORY USAGE WILL ESPLODE! Some games
        can have 1000s of moves and branching factors of 100s or more.
        '''
        # I am root. Ignore.
        if self.current_node.parent_edge is None:
            return

        # Go up the tree one move and prune all the downtree nodes except the
        # one the game ended up moving into.
        # - Set them to None as well - this prevents errors from trying to
        #   things like print them after you've deleted them (because Python still
        #   thinks there is a dataclass there, for e.g.).
        parent_node = self.current_node.parent_edge.parent_node
        for child_edge in parent_node.child_edges:
            if child_edge.child_node == self.current_node:
                continue
            del child_edge.child_node
            child_edge.child_node = None

    def add_node(self, state, parent_edge):
        '''
        Create game search tree node, link to parent edge.
        '''
        # Detect terminal once for each state and stash it instead of recalculating each
        # time you do mcts considerations.
        is_terminal = self.environment.is_terminal(state)

        # Calculate allowable_actions, agent values, agent policies
        if is_terminal:
            allowable_actions = []
            values = self.environment.rewards(
                state)  # XXX: Enforce being tuple?
        else:
            allowable_actions = self.environment.enumerate_actions(state)

            agent_features = self.feature_extractor(state,
                                                    self.environment.agents)
            values = self.value_model.predict(agent_features)

            # Only the policy of the agent that is moving at this position is needed.
            agent_policy = self.policy_model.predict(
                agent_features[state.whose_move], allowable_actions)

        node = GameTreeNode(
            state=state,
            parent_edge=parent_edge,
            child_edges=[],
            values=values,
            visit_count=0,
            is_terminal=is_terminal,
        )

        # Link to parent edge
        # - Except when it's the root node which has no parent edge
        if parent_edge is not None:
            parent_edge.child_node = node

        # Initialize edges
        for i, move in enumerate(allowable_actions):
            node.child_edges.append(
                GameTreeEdge(
                    parent_node=node,
                    child_node=None,
                    move=move,
                    prior_probability=agent_policy[i],
                    visit_count=0,
                    reward_totals=[0.0] * len(self.environment.agents),
                ))

        return node

    def puct(
        self,
        node,
        explore_factor=1.0,
        noise_alpha=1.0,
        noise_influence=0.25,
    ):
        # Get visit count of state
        # XXX: is node visit count (sum edge visits or does an expansion count as 1)
        # - What does a "node visit" mean?
        total_node_visits = node.visit_count

        # Ensure total_node_visits isn't 0
        # - If this is the first visit, then the puct exploitation_term and
        #   exploration_term would both be zero without this adjustment. In which
        #   case, instead of choosing based on prior for that first visit, it
        #   would choose randomly among all actions.  Ensuring this is at least 1
        #   will allow the noisy prior to bias the choice.
        if total_node_visits < 1:
            total_node_visits = 1

        # Generate noise for prior probabilities to encourage MCTS exploration
        # - The :noise_alpha is determines the type of variability and the
        #   strength. A value below 1.0 will concentrate the variability to one of
        #   the N moves. A value of 1.0 will make the noise uniform across all
        #   moves.  As the value goes to infinity the noise becomes ineffective.
        #   Chess, shogi, and go had (average_moves, alpha) values of  [(35, .2),
        #   (92, .15), (250, .03)] respectively.
        # - Note that this noise is added for every node in this implementation,
        #   but I believe AZ did it just for the root consideration node.
        # noise = numpy.random.dirichlet([noise_alpha] * len(node.child_edges))
        noise = self.noise_maker.make_noise(noise_alpha, len(node.child_edges))

        # Get highest edge value
        sqrt_total_node_visits = math.sqrt(total_node_visits)
        best_value = 0.0
        best_edge = None
        agent_moving = node.state.whose_move
        policy_overrides = self.policy_overrides[node.state.whose_move]
        for i, child_edge in enumerate(node.child_edges):
            # XXX: Correct behavior for 0 visits?
            # - Seems like it should be 0 and have the policy's prior affect the
            #   choice instead when there is no evidence.
            exploitation_term = 0.0
            if child_edge.visit_count > 0:
                exploitation_term = child_edge.reward_totals[
                    agent_moving] / child_edge.visit_count

            if policy_overrides:
                prior = policy_overrides.get(child_edge.move,
                                             child_edge.prior_probability)
            else:
                prior = child_edge.prior_probability

            noisy_prior = (prior * (1 - noise_influence)) + (noise[i] *
                                                             noise_influence)
            exploration_term = explore_factor * noisy_prior
            exploration_term = exploration_term * (
                sqrt_total_node_visits / (1 + child_edge.visit_count))

            puct_value = exploitation_term + exploration_term

            if settings.VERBOSITY >= 5:
                print(
                    f"puct edge ({hex(hash(str(node.state)))[-6:]})",
                    f"move:{child_edge.move}",
                    f"N(s):{total_node_visits}",
                    f"N(s, a):{child_edge.visit_count}",
                    f"W(s, a):{child_edge.reward_totals[agent_moving]}",
                    f"prior:{round(child_edge.prior_probability, 4)}",
                    f"noise:{round(noise[i], 4)}",
                    f"exploit:{round(exploitation_term, 3)}",
                    f"explore:{round(exploration_term, 3)}",
                )
            # No tie-breaker needed because of noise.
            if (puct_value >= best_value) or (best_edge is None):
                best_value = puct_value
                best_edge = child_edge

        return best_edge

    def run_mcts_considerations(
        self,
        num_steps,
        max_seconds,
        puct_explore_factor,
        puct_noise_alpha,
        puct_noise_influence,
    ):
        # Give the agent up to :num_steps and :max_seconds
        steps = 0
        st_time = time.time()
        while (steps < num_steps) or (time.time() - st_time) < max_seconds:
            steps += 1

            # Select / Expand
            # - For a given node, select next action from current state via puct. If resulting state
            # hasn't been explored before, then create it (with dangling edges).  Stop when you've
            # created a new state (i.e., an expansion occured) or are at a terminal state.
            node_exploring = self.current_node
            expansion_occured = False
            while True:
                if node_exploring.is_terminal or expansion_occured:
                    break
                best_edge = self.puct(
                    node_exploring,
                    puct_explore_factor,
                    puct_noise_alpha,
                    puct_noise_influence,
                )

                # Expand if novel action
                if best_edge.child_node is None:
                    resulting_state = self.environment.transition_state(
                        node_exploring.state, best_edge.move)
                    self.add_node(state=resulting_state, parent_edge=best_edge)
                    expansion_occured = True
                node_exploring = best_edge.child_node
            node_selected = node_exploring

            # Rollouts
            # - just kidding!

            # Backup
            # - Reward is the selected node's state value unless it's a terminal node that has an
            #   objective reward.
            rewards = node_selected.values

            backing_node = node_selected
            while backing_node is not self.current_node:
                # XXX: Do you backup past current position?
                # XXX: Do you increment visit here or before?
                backing_node.parent_edge.visit_count += 1
                for i, val in enumerate(
                        backing_node.parent_edge.reward_totals):
                    backing_node.parent_edge.reward_totals[
                        i] = val + rewards[i]
                backing_node = backing_node.parent_edge.parent_node

                # This is the parent of the edges whose visit_counts were incremented above.
                # Increment the visit_count to the node (sum of all visits to edges) to prevent
                # making puct calculation have to do a first-pass to calculate visit count
                # XXX: Double check this is right. assert same.  Is it adding too many visits to
                #      root consideration node?
                backing_node.visit_count += 1

        if settings.VERBOSITY >= 1:
            steps_per_sec = round(steps / (time.time() - st_time), 1)
            print("MCTS", steps, "considerations,", steps_per_sec, "per sec")

    def display_best_moves(self):
        def build_table(child_edges):
            ttext = "{:<8}{:<8}{:<8}{:<8}".format("MOVE", "VISITS", "PRIOR",
                                                  "P(WIN)\n")
            for child_edge in child_edges:
                p_win = None if not child_edge.visit_count else round(
                    child_edge.reward_totals[self.agent_num] /
                    child_edge.visit_count, 3)
                color = "white"
                if (p_win or 0) > 0:
                    color = "green"
                elif (p_win or 0) < 0:
                    color = "red"
                prior = round(float(child_edge.prior_probability), 3)
                ttext += "{:<8}{:<8}{:<8}[{}]{:<8}[/{}]\n".format(
                    self.environment.action_name_by_id[child_edge.move],
                    child_edge.visit_count,
                    prior,
                    color,
                    (p_win or "-"),
                    color,
                )
            return ttext

        most_visited_edges = [
            (ce.visit_count, i, ce)
            for i, ce in enumerate(self.current_node.child_edges)
        ]  # (num_visits, edge)
        most_visited_edges.sort(reverse=True)
        most_visited_edges = [x[2] for x in most_visited_edges]

        tables = []
        tables.append(build_table(most_visited_edges[:10]))
        if len(most_visited_edges) > 10:
            tables.append(build_table(most_visited_edges[10:20]))
        row_1 = stitch_text_blocks(tables, "        ")
        rprint(row_1)

    def get_current_temperature(self):
        '''
        From Alpha Go Zero Paper (https://doi.org/10.1038/nature24270):

            Evaluation (and tournament play probably)
                "...using an infinitesimal temperature τ→ 0 (that is, we
                deterministically select the move with maximum visit count, to give
                the strongest possible play)..."

            Self Play
                "For the first 30 moves of each game, the temperature is set to
                τ = 1; this selects moves proportionally to their visit count in
                MCTS, and ensures a diverse set of positions are encountered.
                For the remainder of the game, an infinitesimal temperature is
                used, τ→ 0. Additional exploration is achieved by adding
                Dirichlet noise to the prior probabilities in the root node s0,
                specifically P(s, a) = (1 − ε)pa + εηa, where η ∼ Dir(0.03) and
                ε = 0.25; this noise ensures that all moves may be tried, but
                the search may still overrule bad moves."
        '''
        # XXX: Adapt to number of moves.
        # XXX: Adjust for tournament play vs self play
        return self.temperature

    def select_move(self):
        child_edges = self.current_node.child_edges
        temperature = self.get_current_temperature()

        # Adjust infinitesimal temperatures
        # - This will make it so small it shouldn't matter
        if (temperature is None) or (temperature == 0.0):
            temperature = .05

        temp_factor = (1.0 / temperature)

        # Pre-calculate denominator for temperature adjustment
        sum_adjusted_visits = 0.0
        for child_edge in child_edges:
            sum_adjusted_visits += child_edge.visit_count**temp_factor

        # Build a weight for each edge
        move_weights = [0.0] * len(child_edges)
        for i, child_edge in enumerate(child_edges):
            move_weights[i] = (child_edge.visit_count**
                               temp_factor) / sum_adjusted_visits

        # Select proportional to temperature-adjusted visits
        # - "p" is temperature-adjusted probabilities associated with each edge
        selected_edge = numpy.random.choice(
            child_edges,
            size=1,
            replace=True,
            p=move_weights,
        )[0]
        if settings.VERBOSITY >= 2:
            self.display_best_moves()
        return selected_edge.move

    def make_move(self):
        # Playout Cap Randomization
        consideration_steps = self.full_search_steps
        if random.random() > self.full_search_proportion:
            consideration_steps = self.partial_search_steps

        self.run_mcts_considerations(
            consideration_steps,
            self.move_consideration_time,
            puct_explore_factor=self.puct_explore_factor,
            puct_noise_alpha=self.puct_noise_alpha,
            puct_noise_influence=self.puct_noise_influence,
        )
        return self.select_move()

    def iter_game_tree_positions(self):
        '''Walk the game tree as if you were replaying the game move by move'''
        node = self.game_tree
        for _, move in self.environment.event_history:
            yield (node, move)
            if move is None:
                break
            node = node.get_child_edge(move).child_node

    def record_replay(self, output_dir):
        replay = AgentReplay.from_agent(self)

        # Write replay
        # - mkdir -p replay path if it doesn't exist.
        game_id = self.environment.id
        agent_num = self.agent_num
        output_path = f"{output_dir}/{game_id}-{agent_num}.json"
        full_path_mkdir_p(output_path)
        with open(output_path, 'w') as fout:
            fout.write(json.dumps(replay.marshall()))
        if settings.VERBOSITY >= 2:
            print("Saved replay:", output_path)
        return output_path, replay