Пример #1
0
class TestTriangular(unittest.TestCase):

    def setUp(self):
        self.triangular = TriangularNumbers(random.randint(1, 20))

    def test_base_case(self):
        self.assertEqual(self.triangular.number_array[0],
                         list(range(1, self.triangular.array_size + 1)))

    def test_dimension_one(self):
        index = random.randint(0, (self.triangular.array_size - 1))
        numbers = self.triangular.calculate_numbers(1)
        # use the closed form equation to test first dimension
        self.assertEqual(numbers[index], ((index + 1) * (index + 2) // 2))
Пример #2
0
 def setUp(self):
     self.triangular = TriangularNumbers(random.randint(1, 20))
Пример #3
0
    def run_backoff_search(self, jump_number):
        """ Run a "backoff" search and calculate reward.

            The basic idea is to minimize the worst-case number of steps
            needed to find the decision boundary, by reducing the jump
            interval as more steps are taken. The simplest approach to this
            is using triangular numbers. Credit for intuition/details to
            datagenetics.com/blog/july22012/index.html .
        """

        self.reward = 0
        boundary_found = False
        current_max = self.array_size
        current_min = 0

        # this should be a decent approximation of the largest
        # triangular number necessary, since they grow as a factor
        # of (array_position) ** 2
        tri = TriangularNumbers(2 * int(self.array_size ** 0.5))
        number_list = tri.calculate_numbers(jump_number)
        last_list = tri.calculate_numbers(jump_number - 1)

        # get the smallest index x such that the x-th k-dimensional triangular
        # number is greater than array_size
        current_index = next(i for i, val in enumerate(number_list)
                               if val > self.array_size)

        # for each iteration:
        #    - starting from the 0th index, jump up last_list[current_index]
        #      indices
        #    - if this is equal to decision_boundary, we're done
        #    - if this is less than decision_boundary, decrement current_index
        #      by one and iterate again
        #    - if this is greater than decision_boundary, the boundary is
        #      somewhere between the last index and the current one, so:
        #         -> set number_list to last_list, and last_list to one less
        #            than the previous last_list
        #         -> run the backoff search recursively on the current
        #            partition (formed by the last and current index)
        for _ in xrange(self.num_trials):
            current_value = current_min + last_list[current_index]
            # print("Current value: " + str(current_value))

            if current_value == self.decision_boundary:
                # if we've found the boundary, we guess that and continue
                # iterating
                self.reward += self.decision_boundary

            elif current_value < self.decision_boundary:
                self.reward += current_value
                # this allows us to keep shrinking the search space
                current_min = current_value
                current_index -= 1

            else:
                # the current value in last_list is above the decision boundary
                # no reward, since we guessed too high
                number_list = last_list
                jump_number -= 1
                if jump_number == 0:
                    last_list = [1] * self.array_size
                else:
                    last_list = tri.calculate_numbers(jump_number - 1)

                # rescale to the new interval
                current_index = next(i for i, val in enumerate(number_list)
                                     if val > (current_value - current_min))

        return self.reward