def test_naive_nbest(self): data = [random.randrange(2000) for _ in xrange(1000)] heap = self.makeHeap() for item in data: heap.push(item) if len(heap) > 10: heap.popmin() assert list(heap.iterpop()) == self.sorted(data)[-10:]
def test_extend(self): data = [random.randrange(200) for _ in xrange(100)] heap = self.makeHeap(data) for i in xrange(10): new = [random.randrange(200) for _ in xrange(random.randrange(20))] data.extend(new) heap.extend(new) self.assert_heap_invariant(heap) assert list(heap.iterpop()) == self.sorted(data)
def test_delitem(self): data = [random.randrange(200) for _ in xrange(100)] heap = self.makeHeap(data) while heap: index = random.randrange(len(heap)) value = heap.pop(index) data.remove(value) self.assert_heap_invariant(heap) assert list(heap.iterpop()) == self.sorted(data)
def test_setitem(self): data = [random.randrange(200) for _ in xrange(100)] heap = self.makeHeap(data) indices = [random.randrange(len(heap)) for _ in xrange(100)] for index in indices: value = heap[index] newvalue = value + random.randrange(-10,11) data.remove(value); data.append(newvalue) heap[index] = newvalue self.assert_heap_invariant(heap) assert list(heap.iterpop()) == self.sorted(data)
def test_nbest(self): # Less-naive "N-best" algorithm, much faster (if len(data) is big # enough <wink>) than sorting all of data. However, if we had a max # heap instead of a min heap, it could go faster still via # heapify'ing all of data (linear time), then doing 10 heappops # (10 log-time steps). data = [random.randrange(2000) for _ in xrange(1000)] heap = self.makeHeap(data[:10]) key = self.key or (lambda x:x) for item in data[10:]: if key(item) > key(heap[0]): # this gets rarer the longer we run heap.replace(item) assert list(heap.iterpop()) == self.sorted(data)[-10:]
def test_iter(self): heap = self.makeHeap([random.randrange(200) for _ in xrange(100)]) assert self.sorted(list(heap)) == list(heap.iterpop())