def test_eq():
    this = SortedList(range(10), load=4)
    that = SortedList(range(20), load=4)
    assert not (this == that)
    that.clear()
    that.update(range(10))
    assert this == that
Example #2
0
    class OffsetMapper:
        def __init__(self):
            self.ref_points = SortedList()
            self.adjustments = []

        def set_ref_points(self, p: int, adjustment: int):
            self.ref_points.add(p)
            self.adjustments.append(adjustment)

        def trans_offset(self, origin: int) -> int:
            """
            Translate the offset based on the reference points. If there is no
            translation found, will return None.

            Args:
                origin: The original offset

            Returns:
                The translated offset.

            """
            idx = self.ref_points.bisect_left(origin)
            if idx >= len(self.adjustments):
                return origin + self.adjustments[-1]
            return origin + self.adjustments[idx]

        def clear(self):
            self.ref_points.clear()
            self.adjustments.clear()
Example #3
0
    class LevelMetadata():
        def __init__(self):
            self.uuids = list()
            self.first_indices = SortedList()

        def __len__(self):
            return len(self.uuids)

        def insert(self, uuid, first_index):
            idx = self.first_indices.bisect_left(first_index)
            self.first_indices.add(first_index)
            self.uuids.insert(idx, uuid)

        def clear(self, i=None, j=None):
            if i is None and j is None:
                self.uuids.clear()
                self.first_indices.clear()
            else:
                del self.uuids[i:j]
                del self.first_indices[i:j]

        def get_uuid(self, item):
            idx = self.first_indices.bisect_right(item) - 1
            if idx < 0:
                return None
            return self.uuids[idx]
Example #4
0
def _test5():
    """
    网址:http://www.grantjenks.com/docs/sortedcontainers/sortedlist.html
    """
    from sortedcontainers import SortedList
    # 定义
    sl = SortedList(key=lambda x: -x)  # 降序
    sl = SortedList([3, 1, 2, 1, 5, 4])  # 升序
    print(sl)  # SortedList([1, 1, 2, 3, 4, 5])
    # 插入、删除元素
    sl.add(3)
    sl.add(3)
    sl.discard(2)  # SortedList([1, 1, 3, 3, 3, 4, 5])
    print(sl)
    # 统计某个元素出现的次数
    print(sl.count(3))  # 3
    # 返回第一个和最后一个元素
    print(sl[0])  # 1
    print(sl[-1])  # 5
    # 遍历 set
    for e in sl:
        print(e, end=", ")  # 1, 1, 3, 3, 3, 4, 5,
    print()
    # 判断某元素是否存在
    print(2 in sl)  # False
    # bisect_left() / bisect_right()
    print(sl.bisect_left(3))  # 返回大于等于3的最小元素对应的下标    2
    print(sl.bisect_right(3))  # 返回大于3的最小元素对应的下标    5
    # 清空
    sl.clear()
    print(len(sl))  # 0
    print(len(sl) == 0)  # True
def test_eq():
    this = SortedList(range(10), load=4)
    that = SortedList(range(20), load=4)
    assert not (this == that)
    that.clear()
    that.update(range(10))
    assert this == that
Example #6
0
def test_init():
    slt = SortedList()
    slt._check()

    slt = SortedList(load=10000)
    assert slt._load == 10000
    assert slt._twice == 20000
    assert slt._half == 5000
    slt._check()

    slt = SortedList(range(10000))
    assert all(tup[0] == tup[1] for tup in zip(slt, range(10000)))

    slt.clear()
    assert slt._len == 0
    assert slt._maxes == []
    assert slt._lists == []
    slt._check()
def test_init():
    slt = SortedList()
    slt._check()

    slt = SortedList(load=10000)
    assert slt._load == 10000
    assert slt._twice == 20000
    assert slt._half == 5000
    slt._check()

    slt = SortedList(range(10000))
    assert all(tup[0] == tup[1] for tup in zip(slt, range(10000)))

    slt.clear()
    assert slt._len == 0
    assert slt._maxes == []
    assert slt._lists == []
    slt._check()
def test_init():
    slt = SortedList()
    assert slt.key is None
    slt._check()

    slt = SortedList()
    slt._reset(10000)
    assert slt._load == 10000
    slt._check()

    slt = SortedList(range(10000))
    assert all(tup[0] == tup[1] for tup in zip(slt, range(10000)))

    slt.clear()
    assert slt._len == 0
    assert slt._maxes == []
    assert slt._lists == []
    slt._check()
Example #9
0
def test_init():
    slt = SortedList()
    assert slt.key is None
    slt._check()

    slt = SortedList()
    slt._reset(10000)
    assert slt._load == 10000
    slt._check()

    slt = SortedList(range(10000))
    assert all(tup[0] == tup[1] for tup in zip(slt, range(10000)))

    slt.clear()
    assert slt._len == 0
    assert slt._maxes == []
    assert slt._lists == []
    slt._check()
Example #10
0
class FastSplitter2d:
    def __init__(self, max_size=5000, chunk_count=5):
        self.max_size = max_size
        self.max_x = 0
        self.points = SortedList(key=lambda p: -p[1])
        self.chunk_count = chunk_count

    def add_to_pack(self, p):
        self.max_x = max(self.max_x, p[0])
        new_pos = self.points.bisect_right(p)
        self.points.insert(new_pos, p)

        offset = 0
        bs_vec = []
        while offset < len(self.points):
            bs = self.max_size // (self.max_x + self.points[offset][1])
            bs = min(len(self.points) - offset, bs)
            bs_vec.append(bs)
            offset += bs

        return new_pos, bs_vec

    def make_chunk_gen(self, points):
        prev_bs_vec = [0]
        for p in sorted(list(points), key=lambda p: p[0], reverse=True):
            new_pos, bs_vec = self.add_to_pack(p)

            if len(bs_vec) > len(prev_bs_vec):
                if len(prev_bs_vec) >= self.chunk_count:
                    self.points.pop(new_pos)
                    offset = 0
                    for sz in prev_bs_vec:
                        yield self.points[offset:offset + sz]
                        offset += sz
                    self.points.clear()
                    self.points.add(p)
                    prev_bs_vec = [1]
                    self.max_x = p[0]
            prev_bs_vec = bs_vec
        offset = 0
        for sz in prev_bs_vec:
            yield self.points[offset:offset + sz]
            offset += sz
Example #11
0
class WeightedAStar:
    def __init__(self, w):
        """
        Constructor of the WA* algorithm instance
        :param w: W of the WA* f(n)
        """
        self.w = w
        self.open = SortedList(key=lambda x: x.g + self.w * x.calc_h()
                               )  # priority queue based on f(n)
        self.closed = set()

    def restart(self):
        """
        Restarts the open and closed lists.
        :return:
        """
        self.open.clear()
        self.closed.clear()

    def search(self, start_node):
        """
        Searches from the start node provided.
        :param start_node: Initial state of the search space.
         Node should support the following interface: expand(),calc_h() and is_goal()
        :return: The goal node and the number of nodes expanded.
        """
        self.restart()
        expanded = 0
        self.open.add(start_node)
        while len(self.open) != 0:
            node = self.open.pop(0)
            if node.is_goal():
                return node, expanded
            frontier = node.expand()
            expanded += 1
            for neighbour in frontier:
                if neighbour not in self.closed:
                    self.open.add(neighbour)
            self.closed.add(node)
    def test_SortedList(self):
        # construct
        sorted_list = SortedList([1, 2, 3, 4])
        sorted_list = SortedList()

        # add
        for i in range(5, 0, -1):
            sorted_list.add(i)

        # adding elements using the update() function
        elements = [10, 9, 8, 7, 6]
        sorted_list.update(elements)

        # prints the updated list in sorted order
        print('list after updating: ', sorted_list)

        # removing a particular element using value
        sorted_list.discard(8)
        # removing all elements
        sorted_list.clear()
        print('list after removing all elements using clear: ', sorted_list)

        return
class BeliefBase:
    def __init__(self):
        # Sort beliefs in descending order with respect to their rank value
        self.beliefs = SortedList(key=lambda b: -b.rank)

    def instantiate(self):
        """ Instantiate the Belief Base with some predetermined beliefs """
        self.reset()
        self.expand(pl("(p & q) >> r"), 100)
        self.expand(pl("r"), 10)
        self.expand(pl("p"), 20)
        # self.expand(pl("q"), 30)

    def reset(self):
        """ Sets the belief base to be the empty set Ø """
        self.beliefs.clear()

    def __repr__(self):
        if len(self.beliefs) == 0:
            return "BeliefBase(Ø)"
        return 'BeliefBase([\n  {}\n])'.format(",\n  ".join(str(x) for x in self.beliefs))

    def rank(self, formula):
        formula = to_cnf(formula)
        bb = true
        r = self.beliefs[0].rank if self.beliefs else 0
        for belief in self.beliefs:
            if belief.rank < r:
                if entails(bb, formula):
                    return r
                r = belief.rank
            bb = bb & to_cnf(belief.formula)
        return r if entails(bb, formula) else 0
    
    def expand(self, formula, newrank):
        if self.rank(Not(formula)) > 0:
            print(">>> Formula is inconsistent with belief basis")
            return
        oldrank = self.rank(formula)
        if newrank <= oldrank:
            print(">>> Desired rank is lower than or equal to existing rank")
            return

        beliefs = self.beliefs.copy() # work around shenanigans that happen when deleting from a SortedList you're iterating over
        for belief in beliefs:
            if formula == belief.formula:
                self.beliefs.remove(belief)
        self.beliefs.add(Belief(formula, newrank))
        print(f">>> {formula} added to belief basis with rank {newrank}")

        beliefs = self.beliefs.copy()
        for belief in beliefs:
            if oldrank <= belief.rank <= newrank:
                bb = [to_cnf(x.formula) for x in filter(lambda x: x.rank >= belief.rank and x != belief, self.beliefs)]
                bb = reduce(lambda x, y: x & y, bb, true)
                if entails(bb, belief.formula):
                    print(f">>> Removed {belief} as it is redundant")
                    self.beliefs.remove(belief)
    
    def contract(self, formula):
        if entails(true, formula):
            print(f">>> {formula} is a tautology")
            return

        oldrank = self.rank(formula)
        delta = BeliefBase()
        delta.beliefs = self.beliefs.copy()
        for belief in self.beliefs:
            if belief.rank <= oldrank:
                bb = [to_cnf(x.formula) for x in filter(lambda x: x.rank >= (oldrank + 1), delta.beliefs)]
                bb = reduce(lambda x, y: x & y, bb, true)
                if not entails(bb, formula | belief.formula):
                    r = delta.rank(belief.formula)
                    delta.beliefs.remove(belief)
                    print(f">>> {belief} removed by (C-) condition")
                    if r < oldrank or not entails(bb, formula >> belief.formula):
                        for b in self.beliefs:
                            if formula >> belief.formula == b.formula:
                                delta.beliefs.remove(b)
                        t = Belief(formula >> belief.formula, r)
                        delta.beliefs.add(t)
                        print(f">>> Added {t} to belief basis to satisfy (K-5)")
        self.beliefs = delta.beliefs

    def revision(self, formula, newrank):
        if 0 <= newrank:
            self.contract(Not(formula))
            self.expand(formula, newrank)
        else: 
            print(f"Rank {newrank} is negative.\nRevision not done.")
class Construe(object):
    """
    This class implements the **kardioml.segmentation.teijeiro.* algorithm allowing fine-grained
    control of the steps of the algorithm.
    """

    def __init__(self, root_node, K):
        """
        Initializes a new algorithm execution, receiving as arguments the
        root node for the exploration and the K parameter, which determines the
        exploratory nature of the algorithm.

        Instance Properties
        -------------------
        K:
            Exploration parameter.
        successors:
            Dictionary storing the successor generator of each node.
        last_time:
            Interpretation time of the most advanced interpretation generated
            so far.
        open:
            Sorted list of the open nodes, that can still be expanded.
        closed:
            Sorted list of closed nodes.
        best:
            When a node satisfies the *goal()* function, this attribute is
            assigned to that node. While the finished() method returns False,
            this attribute may be refined with new better interpretations.
        """
        assert K > 0
        self.K = K
        self.root = root_node
        self.successors = weakref.WeakKeyDictionary()
        root_succ = PredictableIter(reasoning.firm_succ(root_node))
        if not root_succ.hasnext():
            raise ValueError('The root node does not have valid successors')
        self.successors[root_node] = root_succ
        self.last_time = root_node.time_point
        ocov, scov, nhyp = valuation(root_node)
        heur = Heuristic(ocov, scov, -self.last_time, nhyp)
        self.open = SortedList([Node(heur, root_node)], key=attrgetter('h'))
        self.closed = SortedList(key=attrgetter('h'))
        self.best = None

    def _update_closed(self, newclosed):
        """
        Updates the *closed* list after an iteration of the algorithm. All
        closed interpretations but the best one are removed from this list.
        """
        if not newclosed:
            return
        tmplst = SortedList(key=attrgetter('h'))
        for lst in (newclosed, self.closed):
            for (ocov, scov, ntime, nhyp), n in lst:
                if -ntime < self.last_time:
                    ocov, scov, nhyp = valuation(n, self.last_time)
                tmplst.add(Node(Heuristic(ocov, scov, ntime, nhyp), n))
        self.closed.clear()
        self.closed.append(tmplst.pop(0))

    def step(self, filt=lambda _: True):
        """
        Performs a new step of the algorithm, by continuing the K-best nodes
        satisfying the *filt* function one step.

        Parameters
        ----------
        filt:
            Boolean function that receives an element of the open list and
            decides if the node can be expanded in this iteration. The first
            K nodes satisfying this filter are expanded.
        """
        newopen = []
        newclosed = []
        ancestors = set()
        optimal = False

        for _ in range(self.K):
            node = next((n for n in self.open if filt(n) and not (optimal and n.node in ancestors)), None)
            # The search stops if no nodes can be expanded or if, being in an
            # optimal context, we need to expand a non-optimal node.
            if node is None or (optimal and node.h.ocov > 0.0):
                break
            self.open.remove(node)
            # Go a step further
            nxt = self.successors[node.node].next()
            self.successors[nxt] = PredictableIter(reasoning.firm_succ(nxt))
            nxtime = nxt.time_point
            if nxtime > self.last_time:
                self.last_time = nxtime
            ocov, scov, nhyp = valuation(nxt, nxtime)
            nxt = Node(Heuristic(ocov, scov, -nxtime, nhyp), nxt)
            # Optimality is determined by the coverage of the successors.
            optimal = optimal or ocov == 0.0
            # Reorganize the open and closed list.
            for n in (node, nxt):
                if self.successors[n.node].hasnext():
                    newopen.append(n)
                    reasoning.save_hierarchy(n.node, ancestors)
                else:
                    newclosed.append(n)
                    if (
                        n is nxt
                        and n.h.ocov == 0.0
                        and goal(n.node)
                        and (self.best is None or n.h < self.best.h)
                    ):
                        self.best = n
        for node in newopen:
            self.open.add(node)
        # The closed list is recalculated by keeping only the best one.
        self._update_closed(newclosed)
        if not self.open:
            if not self.closed:
                raise ValueError('Could not find a complete interpretation.')
            self.best = min(self.closed)

    def prune(self):
        """
        Perform a pruning operation by limiting the size of the *open* list
        only to the K best.
        """
        # Now we get the best nodes with a common valuation.
        newopened = SortedList(key=attrgetter('h'))
        for h, node in self.open:
            ocov, scov, nhyp = valuation(node, self.last_time)
            newopened.add(Node(Heuristic(ocov, scov, h.time, nhyp), node))
        self.open = newopened
        n = min(len(self.open), self.K)
        if not reasoning.SAVE_TREE:
            # We track all interesting nodes in the hierarchy.
            saved = set()
            stop = set()
            for i in range(n):
                node = self.open[i].node
                reasoning.save_hierarchy(node, saved)
                stop.add(node)
                mrg = reasoning._MERGED.get(node)
                if mrg is not None:
                    reasoning.save_hierarchy(mrg, saved)
                    stop.add(mrg)
            for _, node in self.closed:
                reasoning.save_hierarchy(node, saved)
            if self.best is not None:
                reasoning.save_hierarchy(self.best.node, saved)
            # And we prune all nodes outside the saved hierarchy
            stack = [self.root]
            while stack:
                node = stack.pop()
                if node not in saved:
                    node.discard('Sacrificed interpretation')
                elif node not in stop:
                    stack.extend(node.child)
        del self.open[n:]
        # We also clear the reasoning cache, since some interpretations cannot
        # be eligible for merging anymore.
        if self.open:
            earliestime = min(n.past_metrics.time for _, n in self.open)
            reasoning.clear_cache(earliestime)

    def finished(self):
        """
        Checks if the searching procedure is finished, that is, more
        iterations, even if possible, will probably not lead to better
        interpretations that the best one. This is considered true if in the
        open list there are no partial covers with less hypotheses than
        the current best interpretation and that are not ancestors of the
        current best interpretation.
        """
        return self.best is not None and all(
            self.best.node.is_ancestor(n.node)
            for n in self.open
            if n.h.ocov == 0.0 and n.h.nhyp < self.best.h.nhyp
        )
Example #15
0
class LSMTree(WriteOptimizedDS):
    class LevelMetadata():
        def __init__(self):
            self.uuids = list()
            self.first_indices = SortedList()

        def __len__(self):
            return len(self.uuids)

        def insert(self, uuid, first_index):
            idx = self.first_indices.bisect_left(first_index)
            self.first_indices.add(first_index)
            self.uuids.insert(idx, uuid)

        def clear(self, i=None, j=None):
            if i is None and j is None:
                self.uuids.clear()
                self.first_indices.clear()
            else:
                del self.uuids[i:j]
                del self.first_indices[i:j]

        def get_uuid(self, item):
            idx = self.first_indices.bisect_right(item) - 1
            if idx < 0:
                return None
            return self.uuids[idx]

    def __init__(self, disk_filepath, growth_factor=10, enable_bloomfilter=True, bloomfilter_params={'initial_capacity': 3000, 'error_rate': 0.001}, block_size=4096, n_blocks=64, n_input_data=1000):
        super().__init__(disk_filepath, block_size, n_blocks, n_input_data)
        self.growth_factor = growth_factor
        self.memtable = SortedList()
        self.enable_bloomfilter = enable_bloomfilter
        self.bloomfilter_params = bloomfilter_params
        if not os.path.exists(self.disk_filepath):
            os.makedirs(disk_filepath)
        if self.enable_bloomfilter:
            self.bloomfilters = {}
            self.bloomfilters[0] = ScalableBloomFilter(
                **self.bloomfilter_params)

    def insert(self, item):
        if item in self.memtable:
            return
        self.memtable.add(item)
        if self.enable_bloomfilter:
            self.bloomfilters[0].add(item)
        if len(self.memtable) >= self.block_size // 2:
            memtable_copy = list(self.memtable)
            self.dump_to_disk(memtable_copy)
            self.memtable.clear()

    def query(self, item):
        if item not in self.memtable:
            return self.query_from_disk(item)
        else:
            return True

    def dump_to_disk(self, memtable):
        uuid = str(uuid4())
        metadata = LSMTree.LevelMetadata()
        metadata.insert(uuid, memtable[0])
        self.set_level_metadata(0, metadata)
        self.set_level_data(0, uuid, memtable)
        self.compact()

    def get_level_folder(self, level):
        folder = os.path.join(self.disk_filepath, str(level))
        if not os.path.exists(folder):
            os.makedirs(folder)
        return folder

    def is_level_empty(self, level):
        folder = os.path.join(self.disk_filepath, str(level))
        if not os.path.exists(folder):
            os.makedirs(folder)
            return True
        if not os.path.exists(os.path.join(folder, 'metadata')):
            return True
        with open(os.path.join(folder, 'metadata'), 'rb') as f:
            level_meta = pickle.load(f)
        return len(level_meta) == 0

    def get_level_metadata(self, level):
        with open(os.path.join(self.get_level_folder(level), 'metadata'), 'rb') as f:
            meta = pickle.load(f)
        return meta

    def set_level_metadata(self, level, metadata):
        with open(os.path.join(self.get_level_folder(level), 'metadata'), 'wb') as f:
            pickle.dump(metadata, f)

    def get_level_data(self, level, uuid):
        with open(os.path.join(self.get_level_folder(level), uuid), 'rb') as f:
            data = pickle.load(f)
        return data

    def set_level_data(self, level, uuid, data):
        with open(os.path.join(self.get_level_folder(level), uuid), 'wb') as f:
            pickle.dump(data, f)

    def del_level_data(self, level, uuid):
        os.remove(os.path.join(self.get_level_folder(level), uuid))

    def clear_level(self, level):
        folder = self.get_level_folder(level)
        shutil.rmtree(folder)
        self.get_level_folder(level)
        self.set_level_metadata(level, LSMTree.LevelMetadata())

    def compact(self, level=0):
        curr_level_folder = self.get_level_folder(level)
        next_level_folder = os.path.join(self.disk_filepath, str(level + 1))
        curr_level_meta = self.get_level_metadata(level)
        # Current level is not full
        if len(curr_level_meta) < self.growth_factor**level:
            return
        # Current level is full and next level is empty
        if self.is_level_empty(level + 1):
            shutil.rmtree(next_level_folder)
            shutil.copytree(curr_level_folder, next_level_folder)
            shutil.rmtree(curr_level_folder)
            curr_level_folder = self.get_level_folder(level)
            curr_level_meta.clear()
            self.set_level_metadata(level, curr_level_meta)
            if self.enable_bloomfilter:
                self.bloomfilters[level + 1] = self.bloomfilters[level]
                self.bloomfilters[level] = ScalableBloomFilter(
                    **self.bloomfilter_params)
            return
        # Current level is full and next level is not empty
        next_level_meta = self.get_level_metadata(level + 1)
        # Get all data in the current level
        curr_data_list = [self.get_level_data(
            level, uuid) for uuid in curr_level_meta.uuids]
        curr_data = [val for sublist in curr_data_list for val in sublist]
        # Find the indices of the overlapping data in the next level
        next_start_idx = max(next_level_meta.first_indices.bisect_left(curr_data[0]) - 1, 0)
        next_end_idx = next_level_meta.first_indices.bisect_right(curr_data[-1])
        # Get the data in the next level that overlaps this level
        next_data_list = [self.get_level_data(
            level + 1, uuid) for uuid in next_level_meta.uuids[next_start_idx: next_end_idx]]
        next_data = [val for sublist in next_data_list for val in sublist]
        # Delete the data of the next level that was retrieved in the folder and in the metadata
        [self.del_level_data(level + 1, uuid)
            for uuid in next_level_meta.uuids[next_start_idx: next_end_idx]]
        next_level_meta.clear(next_start_idx, next_end_idx)
        # Merge the data in this level and the overlapping data in the next level
        all_sorted_data = []
        i = j = 0
        while i < len(curr_data) and j < len(next_data):
            if curr_data[i] < next_data[j]:
                all_sorted_data.append(curr_data[i])
                i += 1
            else:
                all_sorted_data.append(next_data[j])
                j += 1
        while i < len(curr_data):
            all_sorted_data.append(curr_data[i])
            i += 1
        while j < len(next_data):
            all_sorted_data.append(next_data[j])
            j += 1
        # Break up sorted data into individual files
        for i in range(len(all_sorted_data) // (self.block_size // 2) + 1):
            new_data = all_sorted_data[i*(self.block_size // 2):(i+1)*(self.block_size // 2)]
            if len(new_data) == 0:
                continue
            uuid = str(uuid4())
            first_idx = new_data[0]
            self.set_level_data(level + 1, uuid, new_data)
            next_level_meta.insert(uuid, first_idx)
        # Write next level metadata
        self.set_level_metadata(level + 1, next_level_meta)
        # Clear this level
        self.clear_level(level)
        # Merge bloom filters
        if self.enable_bloomfilter:
            self.bloomfilters[level + 1] = self.bloomfilters[level +
                                                             1].union(self.bloomfilters[level])
            self.bloomfilters[level] = ScalableBloomFilter(
                **self.bloomfilter_params)
        # Compact the next level
        self.compact(level + 1)

    def query_from_disk(self, item):
        total_levels = len(os.listdir(self.disk_filepath))
        for level in range(total_levels):
            if self.enable_bloomfilter and item not in self.bloomfilters[level]:
                continue
            metadata = self.get_level_metadata(level)
            uuid = metadata.get_uuid(item)
            if uuid is None:
                continue
            data = self.get_level_data(level, uuid)
            if item in data:
                return True
        return False
Example #16
0
class AVLTree:
    def __init__(self, capacity):
        self.__container = SortedList()
        self.__capacity = capacity
        self.__max_mem_use = 0

    def max_memory_usage(self):
        return self.__max_mem_use

    def size(self):
        return len(self.__container)

    def is_full(self):
        return self.size() == self.__capacity

    def is_empty(self):
        return self.size() == 0

    def capacity(self):
        return self.__capacity

    def __len__(self):
        return self.size()

    def __contains__(self, item):
        return item in self.__container

    def push(self, item):
        if self.is_full():
            raise MemoryError("No enough space")
        assert not item.is_in_memory
        self.__container.add(item)
        item.is_in_memory = True
        self.__max_mem_use = max(self.__max_mem_use, self.size())

    def min(self):
        return self.__container[0]

    def remove(self, item):
        self.__container.remove(item)
        assert item.is_in_memory
        item.is_in_memory = False

    def pop_min(self):
        item = self.__container.pop(0)
        assert item.is_in_memory
        item.is_in_memory = False
        return item

    def pop_max(self):
        item = self.__container.pop(-1)
        item.is_in_memory = False
        return item

    def pop_max_leaf(self):
        results = []
        while True:
            item = self.pop_max()
            if item.is_leaf():
                node = item
                break
            else:
                results.append(item)
        for item in results:
            self.push(item)
        return node

    def clear(self):
        self.__container.clear()
Example #17
0
class BinBufferedObservationStorage(ObservationStorage):
    def __init__(
        self,
        camera: CameraModel,
        confidence_threshold: float,
        n_bins_horizontal: int,
        bin_buffer_length: int,
        forget_min_observations: Optional[int] = None,
        forget_min_time: Optional[float] = None,
    ):
        self.camera = camera
        self.confidence_threshold = confidence_threshold
        self.bin_buffer_length = bin_buffer_length
        self.forget_min_observations = forget_min_observations
        self.forget_min_time = forget_min_time
        self.pixels_per_bin = self.camera.resolution[0] / n_bins_horizontal
        self.w = n_bins_horizontal
        self.h = int(round(self.camera.resolution[1] / self.pixels_per_bin))

        self._by_time = SortedList(key=lambda obs: obs.timestamp)
        self._by_bin = dict()

    def add(self, observation: Observation):
        if observation.invalid:
            return
        if observation.confidence < self.confidence_threshold:
            return

        idx = self._get_bin(observation)
        if idx < 0 or idx >= self.w * self.h:
            print(f"INDEX OUT OF BOUNDS: {idx}")
            return

        if idx not in self._by_bin:
            self._by_bin[idx] = SortedList(key=lambda obs: obs.timestamp)

        # add to both lookup structures
        _bin: SortedList = self._by_bin[idx]
        _bin.add(observation)
        self._by_time.add(observation)

        # manage within-bin forgetting
        while len(_bin) > self.bin_buffer_length:
            old = _bin.pop(0)
            self._by_time.remove(old)

        # manage across-bin forgetting
        if self.forget_min_observations is None or self.forget_min_time is None:
            return

        while self.count() > self.forget_min_observations:
            oldest_age = observation.timestamp - self._by_time[0].timestamp
            if oldest_age < self.forget_min_time:
                break

            # forget oldest entry
            old = self._by_time.pop(0)
            idx = self._get_bin(old)
            _bin = self._by_bin[idx]
            _bin.remove(old)
            # make sure to remove bin if empty for bin-counting to work
            if len(_bin) == 0:
                self._by_bin.pop(idx)

    @property
    def observations(self) -> Sequence[Observation]:
        return list(self._by_time)

    def clear(self):
        self._by_time.clear()
        self._by_bin.clear()

    def count(self) -> int:
        return len(self._by_time)

    def get_bin_counts(self) -> np.ndarray:
        dense_1d = np.zeros((self.w * self.h, ))
        for idx, _bin in self._by_bin.items():
            dense_1d[idx] = len(_bin)
        return np.reshape(dense_1d, (self.w, self.h))

    def _get_bin(self, observation: Observation) -> int:
        x, y = (floor((ellipse_center + resolution / 2) / self.pixels_per_bin)
                for ellipse_center, resolution in zip(
                    observation.ellipse.center, self.camera.resolution))
        # convert to 1D bin index
        return x + y * self.h
Example #18
0
class OrderBook:
    """An immutable order book.

    TODO: Add BookSide type and make this a real book.

    Attributes:
        exchange_pair (ExchangePair): exchange pair
        bids (SortedList<BookLevel>): Bids, sorted and aggregated.
        asks (SortedList<BookLevel>): Asks, sorted and aggregated.

    """
    def __init__(self, exchange_pair, bids=[], asks=[]):
        self.__exchange_pair = exchange_pair
        self.__bids = SortedList(bids, key=lambda x: -x.price)
        self.__asks = SortedList(asks, key=lambda x: x.price)

    # You could probably implement __eq__, but when would you need it?
    def __eq__(self, other):
        return (isinstance(other, OrderBook)
                and self.__exchange_pair == other.__exchange_pair
                and self.__bids == other.__bids
                and self.__asks == other.__asks)

    @property
    def exchange_pair(self):
        return self.__exchange_pair

    @property
    def exchange_id(self):
        return self.exchange_pair.exchange_id

    @property
    def pair(self):
        return self.exchange_pair.pair

    @property
    def base(self):
        return self.exchange_pair.base

    @property
    def quote(self):
        return self.exchange_pair.quote

    def clear(self):
        self.__bids.clear()
        self.__asks.clear()

    def update(self, side, level):
        """if size is 0, then remove this level"""
        side = self.__bids if side == Side.BUY else self.__asks
        i = side.bisect_left(level)
        if i < len(side) and side[i].price == level.price:
            if level.size == 0:
                side.pop(i)
            else:
                side[i].size = level.size
        else:
            side.add(level)

    @property
    def bids(self):
        return self.__bids

    @property
    def asks(self):
        return self.__asks

    def __repr__(self):
        return f'OrderBook("{self.exchange_pair}", bids={[(x.price, x.size) for x in self.bids]}, asks={[(x.price, x.size) for x in self.asks]})'
Example #19
0
class DispatcherDistributed(DispatcherBase):
    """The central dispatcher for a distributed
    branch-and-bound algorithm.

    Parameters
    ----------
    comm : ``mpi4py.MPI.Comm``, optional
        The MPI communicator to use. If set to None, this
        will disable the use of MPI and avoid an attempted
        import of `mpi4py.MPI` (which avoids triggering a
        call to `MPI_Init()`).
    """

    def __init__(self, comm):
        assert comm.size > 1
        import mpi4py.MPI

        assert mpi4py.MPI.Is_initialized()
        super(DispatcherDistributed, self).__init__()
        self.clock = mpi4py.MPI.Wtime
        self.comm = comm
        # send rank of dispatcher to all workers
        self.dispatcher_rank = DispatcherProxy._init(self.comm, ProcessType.dispatcher)
        assert self.dispatcher_rank == self.comm.rank
        self.worker_ranks = [i for i in range(self.comm.size) if i != self.comm.rank]
        self.needs_work_queue = collections.deque([], len(self.worker_ranks))
        self._solve_info_by_source = {i: _SolveInfo() for i in self.worker_ranks}
        self.last_known_bound = dict()
        self.external_bounds = SortedList()
        self.has_work = set()
        self._send_requests = None
        self.explored_nodes_count = 0

    def _compute_load_imbalance(self):
        pmin = inf
        pmax = -inf
        psum = 0
        for solve_info in self._solve_info_by_source.values():
            count = solve_info.explored_nodes_count
            if count < pmin:
                pmin = count
            if count > pmax:
                pmax = count
            psum += count
        if psum > 0:
            pavg = psum / float(len(self._solve_info_by_source))
            return (pmax - pmin) / pavg * 100.0
        else:
            return 0.0

    def _get_current_bound(self):
        """Get the current global bound"""
        bound = self.queue.bound()
        if self.converger.sense == maximize:
            if len(self.external_bounds) and (
                (bound is None) or (self.external_bounds[-1] > bound)
            ):
                bound = self.external_bounds[-1]
            if (self.worst_terminal_bound is not None) and (
                (bound is None) or (self.worst_terminal_bound > bound)
            ):
                bound = self.worst_terminal_bound
        else:
            if len(self.external_bounds) and (
                (bound is None) or (self.external_bounds[0] < bound)
            ):
                bound = self.external_bounds[0]
            if (self.worst_terminal_bound is not None) and (
                (bound is None) or (self.worst_terminal_bound < bound)
            ):
                bound = self.worst_terminal_bound
        if bound is not None:
            return bound
        else:
            # likely means the dispatcher was initialized
            # with an empty queue
            return self.converger.unbounded_objective

    def _get_final_solve_info(self):
        solve_info = _SolveInfo()
        for worker_solve_info in self._solve_info_by_source.values():
            solve_info.add_from(worker_solve_info)
        return solve_info

    def _get_node_counts(self):
        return (
            self.served_nodes_count,
            self.explored_nodes_count,
            self.queue.size() + len(self.has_work),
        )

    #
    # Overloaded base class methods
    #

    def _check_update_best_objective(self, objective):
        updated = super(DispatcherDistributed, self)._check_update_best_objective(
            objective
        )
        if updated:
            assert self.best_objective == objective
            self_external_bounds = self.external_bounds
            eligible_for_queue = self.converger.eligible_for_queue
            # trim the sorted external_bounds list
            N = len(self_external_bounds)
            if self.converger.sense == maximize:
                i = 0
                for i in range(N):
                    if eligible_for_queue(self_external_bounds[i], objective):
                        break
                if i != 0:
                    self.external_bounds = SortedList(self_external_bounds.islice(i, N))
            else:
                i = N - 1
                for i in range(N - 1, -1, -1):
                    if eligible_for_queue(self_external_bounds[i], objective):
                        break
                if i != N - 1:
                    self.external_bounds = SortedList(
                        self_external_bounds.islice(0, i + 1)
                    )

    def _get_work_to_send(self, dest):
        node = self._get_work_item()
        bound = node.bound
        self.last_known_bound[dest] = bound
        self.external_bounds.add(bound)
        self.has_work.add(dest)
        return node

    def _send_work(self):
        stop = False
        data = None
        if len(self.needs_work_queue) > 0:
            if self._send_requests is None:
                self._send_requests = {i: None for i in self.worker_ranks}
            if self.termination_condition is None:
                while (self.queue.size() > 0) and (len(self.needs_work_queue) > 0):
                    stop = False
                    dest = self.needs_work_queue.popleft()
                    node = self._get_work_to_send(dest)
                    best_node_slots = None
                    if self.best_node is not None:
                        best_node_slots = self.best_node.slots
                    send_ = marshal.dumps(
                        (self.best_objective, best_node_slots, node.slots),
                        config.MARSHAL_PROTOCOL_VERSION,
                    )
                    if self._send_requests[dest] is not None:
                        self._send_requests[dest].Wait()
                    self._send_requests[dest] = self.comm.Isend(
                        [send_, mpi4py.MPI.BYTE], dest, tag=DispatcherResponse.work
                    )
                    # a shortcut to check if we should keep sending nodes
                    if (self.node_limit is not None) and (
                        self.served_nodes_count >= self.node_limit
                    ):
                        break
            if len(self.needs_work_queue) == (self.comm.size - 1):
                if self.termination_condition is None:
                    self.termination_condition = TerminationCondition.queue_empty
                requests = []
                for r_ in self._send_requests.values():
                    if r_ is not None:
                        requests.append(r_)
                mpi4py.MPI.Request.Waitall(requests)
                self._send_requests = None
                stop = True
                data = (
                    self._get_current_bound(),
                    self.termination_condition,
                    self._get_final_solve_info(),
                )
                best_node_slots = None
                if self.best_node is not None:
                    best_node_slots = self.best_node.slots
                send_ = marshal.dumps(
                    (
                        self.best_objective,
                        best_node_slots,
                        data[0],
                        _termination_condition_to_int[data[1]],
                        data[2].data,
                    ),
                    config.MARSHAL_PROTOCOL_VERSION,
                )
                # everyone needs work, so we must be done
                requests = []
                while len(self.needs_work_queue) > 0:
                    dest = self.needs_work_queue.popleft()
                    requests.append(
                        self.comm.Isend(
                            [send_, mpi4py.MPI.BYTE], dest, DispatcherResponse.nowork
                        )
                    )
                mpi4py.MPI.Request.Waitall(requests)

        return (stop, self.best_objective, self.best_node, data)

    def _update_solve_info(self, solve_info_data, source):
        self.explored_nodes_count -= self._solve_info_by_source[
            source
        ].explored_nodes_count
        self._solve_info_by_source[source].data[:] = solve_info_data
        self.explored_nodes_count += self._solve_info_by_source[
            source
        ].explored_nodes_count

    #
    # Interface
    #

    def initialize(
        self,
        best_objective,
        best_node,
        initialize_queue,
        queue_strategy,
        converger,
        node_limit,
        time_limit,
        queue_limit,
        track_bound,
        log,
        log_interval_seconds,
        log_new_incumbent,
    ):
        """Initialize the dispatcher. See the
        :func:`pybnb.dispatcher.DispatcherBase.initialize`
        method for argument descriptions."""
        self.needs_work_queue.clear()
        for solve_info in self._solve_info_by_source.values():
            solve_info.reset()
        self.last_known_bound.clear()
        self.external_bounds.clear()
        self.has_work.clear()
        self._send_requests = None
        self.explored_nodes_count = 0
        if best_node is not None:
            best_node = _SerializedNode.from_node(best_node)
        initialize_queue_ = DispatcherQueueData(
            nodes=[
                _SerializedNode.from_node(node)
                if (type(node) is not _SerializedNode)
                else node
                for node in initialize_queue.nodes
            ],
            worst_terminal_bound=initialize_queue.worst_terminal_bound,
            sense=initialize_queue.sense,
        )
        super(DispatcherDistributed, self).initialize(
            best_objective,
            best_node,
            initialize_queue_,
            queue_strategy,
            converger,
            node_limit,
            time_limit,
            queue_limit,
            track_bound,
            log,
            log_interval_seconds,
            log_new_incumbent,
        )
        if self.journalist is not None:
            self.log_info(
                "Starting branch & bound solve:\n"
                " - dispatcher pid: %s (%s)\n"
                " - worker processes: %d"
                % (os.getpid(), socket.gethostname(), len(self.worker_ranks))
            )
            self.journalist.tic()

    def update(
        self, best_objective, best_node, terminal_bound, solve_info, node_list, source
    ):
        """Update local worker information.

        Parameters
        ----------
        best_objective : float or None
            A new potential best objective found by the
            worker.
        best_node : :class:`Node <pybnb.node.Node>` or None
            A new potential best node found by the worker.
        terminal_bound : float or None
            The worst bound of any terminal nodes that were
            processed by the worker since the last update.
        solve_info : :class:`_SolveInfo`
            The most up-to-date worker solve information.
        node_list : list
            A list of nodes to add to the queue.
        source : int
            The worker process rank that the update came from.

        Returns
        -------
        solve_finished : bool
            Indicates if the dispatcher has terminated the solve.
        new_objective : float
            The best objective value known to the dispatcher.
        best_node : :class:`Node <pybnb.node.Node>` or None
            The best node known to the dispatcher.
        data : ``array.array`` or None
            If solve_finished is false, a data array
            representing a new node for the worker to
            process. Otherwise, a tuple containing the
            global bound, the termination condition string,
            and the number of explored nodes.
        """
        assert self.initialized
        self._update_solve_info(solve_info.data, source)
        self.needs_work_queue.append(source)
        self.has_work.discard(source)
        if source in self.last_known_bound:
            val_ = self.last_known_bound[source]
            try:
                self.external_bounds.remove(val_)
            except ValueError:
                # rare, but can happen when
                # _check_update_best_node modifies
                # the external_bounds list
                pass
        if best_objective is not None:
            self._check_update_best_objective(best_objective)
        if best_node is not None:
            assert best_node._uuid is not None
            self._check_update_best_node(best_node)
        if len(node_list) > 0:
            for node in node_list:
                self._add_work_to_queue(node)
        if terminal_bound is not None:
            self._check_update_worst_terminal_bound(terminal_bound)
        last_global_bound = self.last_global_bound
        self._check_termination()
        ret = self._send_work()
        stop = ret[0]
        if not stop:
            if self.journalist is not None:
                force = (last_global_bound == self.converger.unbounded_objective) and (
                    last_global_bound != self.last_global_bound
                )
                self.journalist.tic(force=force)
        else:
            if self.journalist is not None:
                self.journalist.tic(force=True)
                self.journalist.log_info(self.journalist._lines)
            assert self.initialized
            self.initialized = False
        return ret

    #
    # Distributed Interface
    #

    def serve(self):
        """Start listening for distributed branch-and-bound
        commands and map them to commands in the local
        dispatcher interface."""

        def rebuild_update_requests(size):
            update_requests = {}
            update_data = bytearray(size)
            for i in self.worker_ranks:
                update_requests[i] = self.comm.Recv_init(
                    update_data, source=i, tag=DispatcherAction.update
                )
            return update_requests, update_data

        update_requests = None
        solve_info_ = _SolveInfo()
        data = None
        msg = Message(self.comm)
        while 1:
            msg.probe()
            tag = msg.tag
            source = msg.source
            if tag == DispatcherAction.update:
                size = msg.status.Get_count(datatype=mpi4py.MPI.BYTE)
                if (data is None) or (len(data) < size):
                    update_requests, data = rebuild_update_requests(size)
                req = update_requests[msg.status.Get_source()]
                req.Start()
                req.Wait()
                if six.PY2:
                    data_ = str(data)
                else:
                    data_ = data
                (
                    best_objective,
                    best_node,
                    terminal_bound,
                    solve_info_data,
                    node_list,
                ) = marshal.loads(data_)
                solve_info_.data = array.array("d", solve_info_data)
                if best_node is not None:
                    best_node = _SerializedNode(best_node)
                node_list = [_SerializedNode(state) for state in node_list]
                ret = self.update(
                    best_objective,
                    best_node,
                    terminal_bound,
                    solve_info_,
                    node_list,
                    source,
                )
                stop = ret[0]
                if stop:
                    best_node = ret[2]
                    if best_node is not None:
                        best_node = _SerializedNode.restore_node(best_node.slots)
                    return (
                        ret[1],  # best_objective
                        best_node,
                        ret[3][0],  # global_bound
                        ret[3][1],  # termination_condition
                        ret[3][2],
                    )  # global_solve_info
            elif tag == DispatcherAction.log_info:
                msg.recv(mpi4py.MPI.CHAR)
                self.log_info(msg.data)
            elif tag == DispatcherAction.log_warning:
                msg.recv(mpi4py.MPI.CHAR)
                self.log_warning(msg.data)
            elif tag == DispatcherAction.log_debug:
                msg.recv(mpi4py.MPI.CHAR)
                self.log_debug(msg.data)
            elif tag == DispatcherAction.log_error:
                msg.recv(mpi4py.MPI.CHAR)
                self.log_error(msg.data)
            elif tag == DispatcherAction.log_critical:
                msg.recv(mpi4py.MPI.CHAR)
                self.log_critical(msg.data)
            elif tag == DispatcherAction.stop_listen:
                msg.recv()
                assert msg.data is None
                return (None, None, None, None, None)
            else:  # pragma:nocover
                raise RuntimeError(
                    "Dispatcher received invalid "
                    "message tag '%s' from rank '%s'" % (tag, source)
                )

    def save_dispatcher_queue(self):
        """Saves the current dispatcher queue. The result can
        be used to re-initialize a solve.

        Returns
        -------
        queue_data : :class:`pybnb.dispatcher.DispatcherQueueData`
            An object storing information that can be used
            to re-initialize the dispatcher queue to its
            current state.
        """
        nodes = []
        for node in self.queue.items():
            node_ = node.restore_node(node.slots)
            nodes.append(node_)
        return DispatcherQueueData(
            nodes=nodes,
            worst_terminal_bound=self.worst_terminal_bound,
            sense=self.converger.sense,
        )
Example #20
0
                            clist = [ivalue]
                            cstart = ipos
                            cend = ipos
                            cPass = not options.debug  #and (windowHigh-windowLow >= variCutoff)
                    # Increase position in window towards end of the list
                    posInWindow += 1
                evaluateValues(chrom, cstart, cend, clist, cPass)
            # Clean-up for next region
            chrom = nchrom
            pos = npos
            step = nstep
            cstart, cend = None, None
            cPass = not options.debug
            clist = []
            windowValues = []
            windowValuesSorted.clear()
            posInWindow = 0
    else:
        if len(line.strip()) == 0: continue
        pos += step
        value = int(line)

        # Window buffer has not reached its size yet
        if len(windowValues) < windowSize:
            windowValues.append(value)
            windowValuesSorted.add(value)
        # Window buffer has reached its size
        elif len(windowValues) == windowSize:
            # Buffer filled but current position is not yet centered in the window,
            # work with current buffer state
            if (posInWindow < halfWindow):
Example #21
0
class SCEngine:
    '''
    Fast tree-based implementation for indexing, using the
    ``sortedcontainers`` package.

    Parameters
    ----------
    data : Table
        Sorted columns of the original table
    row_index : Column object
        Row numbers corresponding to data columns
    unique : bool (defaults to False)
        Whether the values of the index must be unique
    '''
    def __init__(self, data, row_index, unique=False):
        node_keys = map(tuple, data)
        self._nodes = SortedList(starmap(Node, zip(node_keys, row_index)))
        self._unique = unique

    def add(self, key, value):
        '''
        Add a key, value pair.
        '''
        if self._unique and (key in self._nodes):
            message = 'duplicate {0:!r} in unique index'.format(key)
            raise ValueError(message)
        self._nodes.add(Node(key, value))

    def find(self, key):
        '''
        Find rows corresponding to the given key.
        '''
        return [node.value for node in self._nodes.irange(key, key)]

    def remove(self, key, data=None):
        '''
        Remove data from the given key.
        '''
        if data is not None:
            item = Node(key, data)
            try:
                self._nodes.remove(item)
            except ValueError:
                return False
            return True
        items = list(self._nodes.irange(key, key))
        for item in items:
            self._nodes.remove(item)
        return bool(items)

    def shift_left(self, row):
        '''
        Decrement rows larger than the given row.
        '''
        for node in self._nodes:
            if node.value > row:
                node.value -= 1

    def shift_right(self, row):
        '''
        Increment rows greater than or equal to the given row.
        '''
        for node in self._nodes:
            if node.value >= row:
                node.value += 1

    def items(self):
        '''
        Return a list of key, data tuples.
        '''
        result = OrderedDict()
        for node in self._nodes:
            if node.key in result:
                result[node.key].append(node.value)
            else:
                result[node.key] = [node.value]
        return result.items()

    def sort(self):
        '''
        Make row order align with key order.
        '''
        for index, node in enumerate(self._nodes):
            node.value = index

    def sorted_data(self):
        '''
        Return a list of rows in order sorted by key.
        '''
        return [node.value for node in self._nodes]

    def range(self, lower, upper, bounds=(True, True)):
        '''
        Return row values in the given range.
        '''
        iterator = self._nodes.irange(lower, upper, bounds)
        return [node.value for node in iterator]

    def replace_rows(self, row_map):
        '''
        Replace rows with the values in row_map.
        '''
        nodes = [node for node in self._nodes if node.value in row_map]
        for node in nodes:
            node.value = row_map[node.value]
        self._nodes.clear()
        self._nodes.update(nodes)

    def __repr__(self):
        return '{0!r}'.format(list(self._nodes))
Example #22
0
class SCEngine:
    '''
    Fast tree-based implementation for indexing, using the
    ``sortedcontainers`` package.

    Parameters
    ----------
    data : Table
        Sorted columns of the original table
    row_index : Column object
        Row numbers corresponding to data columns
    unique : bool (defaults to False)
        Whether the values of the index must be unique
    '''
    def __init__(self, data, row_index, unique=False):
        node_keys = map(tuple, data)
        self._nodes = SortedList(starmap(Node, zip(node_keys, row_index)))
        self._unique = unique

    def add(self, key, value):
        '''
        Add a key, value pair.
        '''
        if self._unique and (key in self._nodes):
            message = f'duplicate {key:!r} in unique index'
            raise ValueError(message)
        self._nodes.add(Node(key, value))

    def find(self, key):
        '''
        Find rows corresponding to the given key.
        '''
        return [node.value for node in self._nodes.irange(key, key)]

    def remove(self, key, data=None):
        '''
        Remove data from the given key.
        '''
        if data is not None:
            item = Node(key, data)
            try:
                self._nodes.remove(item)
            except ValueError:
                return False
            return True
        items = list(self._nodes.irange(key, key))
        for item in items:
            self._nodes.remove(item)
        return bool(items)

    def shift_left(self, row):
        '''
        Decrement rows larger than the given row.
        '''
        for node in self._nodes:
            if node.value > row:
                node.value -= 1

    def shift_right(self, row):
        '''
        Increment rows greater than or equal to the given row.
        '''
        for node in self._nodes:
            if node.value >= row:
                node.value += 1

    def items(self):
        '''
        Return a list of key, data tuples.
        '''
        result = OrderedDict()
        for node in self._nodes:
            if node.key in result:
                result[node.key].append(node.value)
            else:
                result[node.key] = [node.value]
        return result.items()

    def sort(self):
        '''
        Make row order align with key order.
        '''
        for index, node in enumerate(self._nodes):
            node.value = index

    def sorted_data(self):
        '''
        Return a list of rows in order sorted by key.
        '''
        return [node.value for node in self._nodes]

    def range(self, lower, upper, bounds=(True, True)):
        '''
        Return row values in the given range.
        '''
        iterator = self._nodes.irange(lower, upper, bounds)
        return [node.value for node in iterator]

    def replace_rows(self, row_map):
        '''
        Replace rows with the values in row_map.
        '''
        nodes = [node for node in self._nodes if node.value in row_map]
        for node in nodes:
            node.value = row_map[node.value]
        self._nodes.clear()
        self._nodes.update(nodes)

    def __repr__(self):
        if len(self._nodes) > 6:
            nodes = list(self._nodes[:3]) + ['...'] + list(self._nodes[-3:])
        else:
            nodes = self._nodes
        nodes_str = ', '.join(str(node) for node in nodes)
        return f'<{self.__class__.__name__} nodes={nodes_str}>'
Example #23
0
class _DHTNode(object):
    def __init__(self, n=8, ff=0.80, direction=LEFT_TO_RIGHT, parent=None):
        """
        Defines an instance of a new Dynamic Hash Table.
        :param n: The max number of entries per bucket.
        :param ff: The fill factor for each bucket.
        :param direction: The direction to consume the key from.
        :param parent: A reference to the parent node.
        """
        if n < 3:
            n = 3
        self.n = n
        if ff < 0.25:
            ff = 0.25
        self.ff = ff
        if not (direction == LEFT_TO_RIGHT or direction == RIGHT_TO_LEFT):
            direction = LEFT_TO_RIGHT
        self.n = n
        self.ff = ff
        self.direction = direction
        self.parent = parent
        self.left = SortedList(key=extract_key)
        self.right = SortedList(key=extract_key)

    def add(self, key, value, bkey):
        """
        Adds a key-value pair to the DHT.
        :param key: The key to add.
        :param value: The corresponding value.
        :param bkey: A binary representation of the key.
        """
        bit, ck = consume_bkey(bkey, self.direction)
        if bit == LEFT_BIT:
            if isinstance(self.left, SortedList):
                self.left.add(_IndexEntry(key, value, ck))
                if len(self.left) > self.n * self.ff:
                    self._overflow(LEFT_BIT)
            elif isinstance(self.left, _DHTNode):
                self.left.add(key, value, ck)
            else:
                raise Exception()
        elif bit == RIGHT_BIT:
            if isinstance(self.right, SortedList):
                self.right.add(_IndexEntry(key, value, ck))
                if len(self.right) > self.n * self.ff:
                    self._overflow(RIGHT_BIT)
            elif isinstance(self.right, _DHTNode):
                self.right.add(key, value, ck)
            else:
                raise Exception()
        else:
            raise Exception()

    def contains(self, key, bkey):
        """
        Determines if the DHT contains at lest one key-value entry with the given key.
        :param key: The key to lookup.
        :param bkey: The binary representation of the key.
        :return: True if at least one key-value entry is found corresponding to the given key, False otherwise.
        """
        bit, ck = consume_bkey(bkey, self.direction)
        if bit == LEFT_BIT:
            if isinstance(self.left, SortedList):
                for entry in self.left:
                    if key == entry.key:
                        return True
                return False
            elif isinstance(self.left, _DHTNode):
                return self.left.contains(key, ck)
            else:
                raise Exception()
        elif bit == RIGHT_BIT:
            if isinstance(self.right, SortedList):
                for entry in self.right:
                    if key == entry.key:
                        return True
                return False
            elif isinstance(self.right, _DHTNode):
                return self.right.contains(key, ck)
            else:
                raise Exception()
        else:
            raise Exception()

    def delete(self, key, bkey):
        """
        Deletes the first matching key-value entry from the DHT.
        :param key: The key to lookup.
        :param bkey: The binary representation of the key.
        """
        bit, ck = consume_bkey(bkey, self.direction)
        if bit == LEFT_BIT:
            if isinstance(self.left, SortedList):
                discard = None
                for entry in self.left:
                    if key == entry.key:
                        discard = entry
                        break
                if discard:
                    self.left.discard(discard)
            elif isinstance(self.left, _DHTNode):
                self.left.delete(key, ck)
            else:
                raise Exception()
        elif bit == RIGHT_BIT:
            if isinstance(self.right, SortedList):
                discard = None
                for entry in self.right:
                    if key == entry.key:
                        discard = entry
                        break
                if discard:
                    self.right.discard(discard)
            elif isinstance(self.right, _DHTNode):
                self.right.delete(key, ck)
            else:
                raise Exception()
        else:
            raise Exception()
        if (isinstance(self.left, SortedList) and not self.left
                and isinstance(self.right, SortedList) and not self.right):
            self._underflow()

    def get(self, key, bkey):
        """
        Gets the first matching key-value entry from the key
        :param key: The key to lookup.
        :param bkey: The binary representation of the key.
        :return: True if at least one key-value entry is found corresponding to the given key, False otherwise.
        """
        bit, ck = consume_bkey(bkey, self.direction)
        if bit == LEFT_BIT:
            if isinstance(self.left, SortedList):
                for entry in self.left:
                    if key == entry.key:
                        return entry.value
                return None
            elif isinstance(self.left, _DHTNode):
                return self.left.get(key, ck)
            else:
                raise Exception()
        elif bit == RIGHT_BIT:
            if isinstance(self.right, SortedList):
                for entry in self.right:
                    if key == entry.key:
                        return entry.value
                return None
            elif isinstance(self.right, _DHTNode):
                return self.right.get(key, ck)
            else:
                raise Exception()
        else:
            raise Exception()

    def height(self):
        """
        Gets the height of the DHT.
        :return: The height of the DHT.
        """
        if isinstance(self.left, SortedList) and isinstance(
                self.right, SortedList):
            return 1
        left = 0
        right = 0
        if isinstance(self.left, _DHTNode):
            left = self.left.height() + 1
        if isinstance(self.right, _DHTNode):
            right = self.right.height() + 1
        return max(left, right)

    def traverse(self):
        """
        Traverses the DHT yielding key-value pairs as a Python generator.
        :return: A Python generator over the key-value pairs in the DHT.
        """
        if isinstance(self.left, SortedList):
            for entry in self.left:
                yield entry.key, entry.value
        elif isinstance(self.left, _DHTNode):
            yield from self.left.traverse()
        else:
            raise Exception()
        if isinstance(self.right, SortedList):
            for entry in self.right:
                yield entry.key, entry.value
        elif isinstance(self.right, _DHTNode):
            yield from self.right.traverse()
        else:
            raise Exception()

    def _overflow(self, bit):
        """
        Redistributes the indicated buckets keys to a new left and right bucket. An overflow happens when a bucket
        grows to large, i.e. its total length is greater than its maximum number of entries times its fill factor.
        :param bit: The bit representing the buck that overflowed. One of {LEFT_BIT, RIGHT_BIT}.
        """
        if bit == LEFT_BIT and isinstance(self.left, SortedList):
            new_left = _DHTNode(n=self.n,
                                ff=self.ff,
                                direction=self.direction,
                                parent=self)
            for entry in self.left:
                new_left.add(entry.key, entry.value, entry.bkey)
            self.left.clear()
            self.left = new_left
        elif bit == RIGHT_BIT and isinstance(self.right, SortedList):
            new_right = _DHTNode(n=self.n,
                                 ff=self.ff,
                                 direction=self.direction,
                                 parent=self)
            for entry in self.right:
                new_right.add(entry.key, entry.value, entry.bkey)
            self.right.clear()
            self.right = new_right
        else:
            raise Exception()

    def _underflow(self):
        """
        Coalesces the two buckets pointed to by this this into a single bucket. An underflow occurs when a deletion
        causes a state where by both buckets pointed to by this node are empty. This rule is ignored for the root node.
        """
        # only underflow if we are not the root node, root node cannot underflow
        if self.parent:
            if self.parent.left == self:
                self.parent.left = SortedList(key=extract_key)
            elif self.parent.right == self:
                self.parent.right = SortedList(key=extract_key)
            else:
                raise Exception()
Example #24
0
class Context:

    def __init__(
            self,
            run_date: datetime
    ):

        from collections import defaultdict
        from sortedcontainers import SortedList
        from db.mongo import connector

        self.run_date = run_date
        self.member: Member
        self.enrollments = SortedList([Enrollment])
        self.visits = SortedList([Visit])
        self.pharm = None
        self.mmdf: [MMDF] = []
        self.age_eligibility = False
        self.ce_eligibility = False
        self.enrolled_in_snp = False
        self.required_exclusion = False
        self.long_term_institution = False
        self.gaps_in_care = False
        self.frailty = False
        self.advanced_illness = False
        self.anchor_date_eligibility = False
        self.on_dementia_meds = False
        self.bilateral_mastectomy = False
        self.optional_exclusions = False
        self.overlapping_enrollments = defaultdict(list)
        self.db_conn = connector.Connector()

    def __repr__(self):
        return 'Run date {}'.format(self.run_date)

    def reset(self) -> None:
        self.enrollments.clear()
        self.overlapping_enrollments.clear()
        self.visits.clear()
        self.mmdf.clear()
        self.age_eligibility = False
        self.ce_eligibility = False
        self.enrolled_in_snp = False
        self.required_exclusion = False
        self.long_term_institution = False
        self.gaps_in_care = False
        self.frailty = False
        self.advanced_illness = False
        self.pharm = None
        self.on_dementia_meds = False
        self.bilateral_mastectomy = False
        self.optional_exclusions = False

    def add_enrollment(self, enrollment: dict) -> None:
        from pandas import date_range
        from model.overlapping_enrollments import OverlappingEnrollments

        start_date = enrollment['StartDate']
        if start_date == 'NaT':
            return

        finish_date = enrollment['FinishDate']
        enrolled_date_range = date_range(start_date, finish_date)
        payer = enrollment['Payer']

        idx = 0
        for mem_enrollment in self.enrollments:
            overlapping_dates: DatetimeIndex = mem_enrollment.dates.intersection(enrolled_date_range)
            if not overlapping_dates.empty:
                overlap_enrollments = OverlappingEnrollments([payer, mem_enrollment.payer], overlapping_dates)

                # do not store duplicates
                if len(self.overlapping_enrollments[idx]) > 0 and \
                        self.overlapping_enrollments[idx][-1] == overlap_enrollments:
                    continue

                self.overlapping_enrollments[idx].append(overlap_enrollments)
            idx += 1

        self.enrollments.add(Enrollment(enrolled_date_range, payer))

    def add_encounter(self, encounter: dict) -> None:
        service_date = datetime.fromisoformat(encounter['ServiceDate'])
        agg_codes = encounter['AggregatedCodes']
        self.visits.add(Visit(service_date, agg_codes, encounter['CptMod1']))

    def add_pharm(self, pharm: dict) -> None:
        service_date = datetime.fromisoformat(pharm['ServiceDate'])
        dispensed_med_code = pharm['NDCDrugCode']
        self.pharm = Pharm(service_date, dispensed_med_code, self.db_conn)

    def add_mmdf(self, mmdf: dict) -> None:
        run_date = datetime.fromisoformat(mmdf['Rundate'])
        lti_flag = mmdf['LongTermInstitutionalStatus']
        self.mmdf.append(MMDF(run_date, lti_flag))
Example #25
0
class Book:
    """
    Represent bid / ask book.
    * Book only allows one user order at a time
    * User order will not affect book statistics like quote and volume
    """

    def __init__(self, side: str, key_func: Optional[Callable[[int], int]]) -> None:
        self.side = side
        self.key_func = key_func if key_func else lambda x: x

        # We need this because price levels follow price priority not time priority (which dict alone can provides)
        self.prices = SortedList(key=key_func)  # Sorted prices
        self.price_levels: Dict[int, PriceLevel] = {}  # Price to level map
        self.order_pool: Dict[int, PriceLevel] = {}  # Order ID to level map
        # Store order price and PriceLevel. We do not need ID since there is only one order
        self.user_order_info: Optional[Tuple[int, PriceLevel]] = None

        self._front_idx: Optional[int] = None

    def reset(self):
        self.prices.clear()
        self.price_levels.clear()
        self.order_pool.clear()
        self.user_order_info = None
        self._front_idx = None

    # ========== Order Operations ==========
    def add_limit_order(self, order: LimitOrder) -> None:
        """ Add limit order to the correct price level """
        if order.id in self.order_pool:
            raise RuntimeError(f'LimitOrder {order.id} already exists')

        self.order_pool[order.id] = self._get_price_level(order.price, force_index=True).add_limit_order(order)

    def match_limit_order(self, market_order: MarketOrder) -> Tuple[bool, Optional[Execution]]:
        """ Match environment order against limit order. Remove empty price level where needed """
        # Sometime environment order may not follow time priority. We should follow the referenced order ID in this case
        user_order = None
        target_price_level = self.order_pool[market_order.id]

        # User orders may create price levels that do not exist in the real market. Need to match against those first
        if target_price_level.price != self.prices[0]:
            top_level = self.price_levels[self.prices[0]]
            if top_level.shares > 0:
                # Shares > 0 means that there are real LimitOrder exists in the top level
                raise RuntimeError('Market order being matched against levels not in the front')
            user_order = top_level.pop_user_order()
            self._remove_price_level_if_empty(top_level)

        # Now get the user orders that are in front of the matched real LimitOrder
        price_level, exhausted, executed_order = target_price_level.match_limit_order(market_order)
        self._remove_price_level_if_empty(price_level)

        # It can be that both order are None
        if executed_order is not None:
            user_order = executed_order

        # Whether the matching limit order is already exhausted
        if exhausted:
            del self.order_pool[market_order.id]

        # Update user order pool and return executions
        return exhausted, self._handle_matched_user_limit_order(user_order) if user_order else None

    def cancel_order(self, order: CancelOrder) -> None:
        """ Cancel (partial) shares of a LimitOrder """
        self.order_pool[order.id].cancel_order(order)

    def delete_order(self, order: DeleteOrder):
        """ Delete the whole LimitOrder """
        price_level = self.order_pool[order.id].delete_order(order)
        del self.order_pool[order.id]
        self._remove_price_level_if_empty(price_level)

    # ========== User Order Operation ==========
    def add_user_limit_order(self, order: UserLimitOrder) -> None:
        """
        Add user limit order to the correct price level
        * Remove the old user order if exists
        * We do not want to deal with time priority because
            * This simplifies the flow
            * Last action's effect will spill over to the current one
        """
        if self.user_order_info:
            original_id, price_level = self.user_order_info
            price_level.pop_user_order()  # Only one user order is allowed
            self._remove_price_level_if_empty(price_level)

        self.user_order_info = order.price, self._get_price_level(order.price).add_user_limit_order(order)

    def match_limit_order_for_user(self, order: UserMarketOrder) -> Execution:
        """ Match LimitOrder for UserMarketOrder """
        if self.user_order_info:
            raise RuntimeError('Cannot execute MarketOrder on the side that also has user LimitOrder')

        total_value = 0
        shares = 0

        # Recall that we are not actually matching the LimitOrders. No need to remove the executed LimitOrder.
        for price in self.prices:
            executed = order.shares - self.price_levels[price].match_limit_order_for_user(order)
            total_value += price * executed
            shares += executed
            if order.shares == 0:
                break

        if order.shares > 0:
            raise RuntimeError('User market order cannot be fully executed')

        return Execution(order.id, int(total_value / shares), shares if order.side == 'B' else -shares)

    def delete_user_order(self):
        """ Remove user order """
        if self.user_order_info:
            _, price_level = self.user_order_info
            price_level.pop_user_order()
            self._remove_price_level_if_empty(price_level)
            self.user_order_info = None

    def resolve_book_crossing_on_user_order(self, price: int) -> Optional[Execution]:
        """
        User orders may be placed inside the real market, in which case the newly added real order may cross with the
            user orders. When this happens, we assume that the user orders are executed
        """
        signed_price = self.key_func(price)

        quote = self.quote
        if quote and self.key_func(quote) <= signed_price:
            raise RuntimeError('Real order crosses real order')

        if self.user_order_info and self.key_func(self.user_order_info[0]) <= signed_price:
            price_level = self.price_levels[self.user_order_info[0]]
            execution = self._handle_matched_user_limit_order(price_level.pop_user_order())
            # Must be empty
            self._remove_price_level_if_empty(price_level)
            return execution
        return None

    # ========== Private Methods ==========
    def _get_price_level(self, price: int, force_index=False) -> PriceLevel:
        """ Return price level indicated by price. Price level will be added if not already exists """
        level = self.price_levels.get(price, None)

        # shares == 0 means that the PriceLevel was previously occupied by user order only
        if level is None:
            self.prices.add(price)
            level = PriceLevel(price)
            self.price_levels[price] = level
            # force_index is used when we are adding a new price level for real order. Order is not added at this point
            #   and shares will be 0. Therefore, we need to force it
            # On the other hand, we still need to run update_front_index for user order because it may change the
            #   ordering
            self._update_front_index(force_index, price)

        elif level.shares == 0:
            self._update_front_index(force_index, price)

        return level

    def _remove_price_level_if_empty(self, price_level: PriceLevel):
        """ Remove PriceLevel if empty """
        if price_level.empty:
            del self.price_levels[price_level.price]
            # "remove" will raise ValueError if not exists
            self.prices.remove(price_level.price)

        if price_level.shares == 0:
            # Separate from the logic above because we run be in the situation where real orders are exhausted
            #   but at least one user order is waiting. In this case, this price level is technically gone
            self._update_front_index()

    def _update_front_index(self, force_index=False, target_price=None) -> None:
        """ Find out the first price level that has real order """
        if not self.prices:
            self._front_idx = None
        else:
            price = self.prices[0]
            if self.price_levels[price].shares > 0 or (force_index and price == target_price):
                self._front_idx = 0
            else:
                self._front_idx = 1 if len(self.prices) > 1 else None

    def _handle_matched_user_limit_order(self, order: UserLimitOrder) -> Execution:
        """ Book-keeping actions for UserLimitOrder execution """
        self.user_order_info = None
        return Execution(order.id, order.price, order.shares if self.side == 'B' else -order.shares)

    # ========== Properties ==========
    # These statistics should not include user orders. Otherwise, we may end up being our own market
    @property
    def quote(self) -> Optional[int]:
        """ Return the front price without user orders """
        if self._front_idx is not None:
            return self.prices[self._front_idx]
        return None

    @property
    def volume(self) -> Optional[int]:
        """ Return the volume at the front without user orders """
        if self._front_idx is not None:
            return self.price_levels[self.quote].shares
        return None

    def get_depth(self, num_levels: int) -> List[Tuple[int, int]]:
        """ Return the top n price levels without user orders """
        if self._front_idx is not None:
            return [(price, self.price_levels[price].shares)
                    for price in self.prices[self._front_idx: self._front_idx + num_levels]]
        return []

    @property
    def empty(self) -> bool:
        if len(self.order_pool) == 0:
            if self.user_order_info is None:
                return len(self.prices) == 0
            return len(self.prices) == 1 and self.price_levels[self.prices[0]].shares == 0
        return False

    @property
    def user_order_price(self) -> Optional[int]:
        if self.user_order_info:
            return self.user_order_info[0]
        return None
Example #26
0
class DispatcherDistributed(DispatcherBase):
    """The central dispatcher for a distributed
    branch-and-bound algorithm.

    Parameters
    ----------
    comm : ``mpi4py.MPI.Comm``, optional
        The MPI communicator to use. If set to None, this
        will disable the use of MPI and avoid an attempted
        import of `mpi4py.MPI` (which avoids triggering a
        call to `MPI_Init()`).
    """
    def __init__(self, comm):
        assert comm.size > 1
        import mpi4py.MPI
        assert mpi4py.MPI.Is_initialized()
        super(DispatcherDistributed, self).__init__()
        self.clock = mpi4py.MPI.Wtime
        self.comm = comm
        # send rank of dispatcher to all workers
        self.dispatcher_rank = DispatcherProxy._init(self.comm,
                                                     ProcessType.dispatcher)
        assert self.dispatcher_rank == self.comm.rank
        self.worker_ranks = [
            i for i in range(self.comm.size) if i != self.comm.rank
        ]
        self.needs_work_queue = \
            collections.deque([],
                              len(self.worker_ranks))
        self._solve_info_by_source = \
            {i: _SolveInfo() for i in self.worker_ranks}
        self.last_known_bound = dict()
        self.external_bounds = SortedList()
        self.first_update = \
            {_r: True for _r in self.worker_ranks}
        self.has_work = set()
        self._send_requests = None
        self.explored_nodes_count = 0

    def _compute_load_imbalance(self):
        node_counts = numpy.array([
            info.explored_nodes_count
            for info in self._solve_info_by_source.values()
        ],
                                  dtype=int)
        imbalance = 0.0
        if sum(node_counts) > 0:
            pmax = float(node_counts.max())
            pmin = float(node_counts.min())
            pavg = float(numpy.mean(node_counts))
            imbalance = (pmax - pmin) / pavg * 100.0
        return imbalance

    def _get_current_bound(self):
        """Get the current global bound"""
        bound = self.queue.bound()
        if self.converger.sense == maximize:
            if len(self.external_bounds) and \
               ((bound is None) or \
                (self.external_bounds[-1] > bound)):
                bound = self.external_bounds[-1]
            if (self.worst_terminal_bound is not None) and \
               ((bound is None) or \
                (self.worst_terminal_bound > bound)):
                bound = self.worst_terminal_bound
        else:
            if len(self.external_bounds) and \
               ((bound is None) or \
                (self.external_bounds[0] < bound)):
                bound = self.external_bounds[0]
            if (self.worst_terminal_bound is not None) and \
               ((bound is None) or \
                (self.worst_terminal_bound < bound)):
                bound = self.worst_terminal_bound
        return bound

    def _get_final_solve_info(self):
        solve_info = _SolveInfo()
        for worker_solve_info in self._solve_info_by_source.values():
            solve_info.add_from(worker_solve_info)
        return solve_info

    def _get_node_counts(self):
        return (self.served_nodes_count, self.explored_nodes_count,
                self.queue.size() + len(self.has_work))

    #
    # Overloaded base class methods
    #

    def _check_update_best_objective(self, objective):
        updated = super(DispatcherDistributed, self).\
            _check_update_best_objective(objective)
        if updated:
            self_external_bounds = self.external_bounds
            eligible_for_queue = self.converger.eligible_for_queue
            # trim the sorted external_bounds list
            N = len(self_external_bounds)
            if self.converger.sense == maximize:
                i = 0
                for i in range(N):
                    if eligible_for_queue(self_external_bounds[i], objective):
                        break
                if i != 0:
                    self.external_bounds = SortedList(
                        self_external_bounds.islice(i, N))
            else:
                i = N - 1
                for i in range(N - 1, -1, -1):
                    if eligible_for_queue(self_external_bounds[i], objective):
                        break
                if i != N - 1:
                    self.external_bounds = SortedList(
                        self_external_bounds.islice(0, i + 1))

    def _get_work_to_send(self, dest):
        node_data = self._get_work_item()
        bound = Node._extract_bound(node_data)
        self.last_known_bound[dest] = bound
        self.external_bounds.add(bound)
        self.has_work.add(dest)
        return node_data

    def _send_work(self):
        stop = False
        data = None
        if len(self.needs_work_queue) > 0:
            if self._send_requests is None:
                self._send_requests = \
                    {i: None for i in self.worker_ranks}
            if self.termination_condition is None:
                while (self.queue.size() > 0) and \
                      (len(self.needs_work_queue) > 0):
                    stop = False
                    dest = self.needs_work_queue.popleft()
                    node_data = self._get_work_to_send(dest)
                    if self._send_requests[dest] is not None:
                        self._send_requests[dest].Wait()
                    self._send_requests[dest] = \
                        self.comm.Isend([node_data,mpi4py.MPI.DOUBLE],
                                        dest,
                                        tag=DispatcherResponse.work)
                    # a shortcut to check if we should keep sending nodes
                    if (self.node_limit is not None) and \
                       (self.served_nodes_count >= self.node_limit):
                        break
            if len(self.needs_work_queue) == (self.comm.size - 1):
                if self.termination_condition is None:
                    self.termination_condition = \
                        TerminationCondition.no_nodes
                requests = []
                for r_ in self._send_requests.values():
                    if r_ is not None:
                        requests.append(r_)
                mpi4py.MPI.Request.Waitall(requests)
                self._send_requests = None
                stop = True
                data = (self._get_current_bound(), self.termination_condition,
                        self._get_final_solve_info())
                send_ = numpy.empty(3 + _SolveInfo._data_size, dtype=float)
                send_[0] = self.best_objective
                send_[1] = data[0]
                send_[2] = _termination_condition_to_int[data[1]]
                send_[3:] = data[2].data
                # everyone needs work, so we must be done
                requests = []
                while len(self.needs_work_queue) > 0:
                    dest = self.needs_work_queue.popleft()
                    requests.append(
                        self.comm.Isend([send_, mpi4py.MPI.DOUBLE], dest,
                                        DispatcherResponse.nowork))
                mpi4py.MPI.Request.Waitall(requests)

        return (stop, self.best_objective, data)

    def _update_solve_info(self, solve_info_data, source):
        self.explored_nodes_count -= \
            self._solve_info_by_source[source].explored_nodes_count
        self._solve_info_by_source[source].data[:] = solve_info_data
        self.explored_nodes_count += \
            self._solve_info_by_source[source].explored_nodes_count

    #
    # Interface
    #

    def initialize(self, best_objective, initialize_queue, queue_strategy,
                   converger, node_limit, time_limit, log,
                   log_interval_seconds, log_new_incumbent):
        """Initialize the dispatcher. See the
        :func:`pybnb.dispatcher.DispatcherBase.initialize`
        method for argument descriptions."""
        self.needs_work_queue.clear()
        for solve_info in self._solve_info_by_source.values():
            solve_info.reset()
        self.last_known_bound.clear()
        self.external_bounds.clear()
        for _r in self.first_update:
            self.first_update[_r] = True
        self.has_work.clear()
        self._send_requests = None
        self.explored_nodes_count = 0
        super(DispatcherDistributed,
              self).initialize(best_objective, initialize_queue,
                               queue_strategy, converger, node_limit,
                               time_limit, log, log_interval_seconds,
                               log_new_incumbent)
        if self.journalist is not None:
            self.log_info("Starting branch & bound solve:\n"
                          " - dispatcher pid: %s (%s)\n"
                          " - worker processes: %d\n"
                          " - queue strategy: %s" %
                          (os.getpid(), socket.gethostname(),
                           len(self.worker_ranks), queue_strategy))
            self.journalist.tic()

    def update(self, best_objective, previous_bound, solve_info,
               node_data_list, source):
        """Update local worker information.

        Parameters
        ----------
        best_objective : float
            The current best objective value known to the
            worker.
        previous_bound : float
            The updated bound computed for the last node
            that was processed by the worker.
        solve_info : :class:`_SolveInfo`
            The most up-to-date worker solve information.
        node_data_list : list
            A list of node data arrays to add to the queue.
        source : int
            The worker process rank that the update came from.

        Returns
        -------
        solve_finished : bool
            Indicates if the dispatcher has terminated the solve.
        new_objective : float
            The best objective value known to the dispatcher.
        data : ``array.array`` or None
            If solve_finished is false, a data array
            representing a new node for the worker to
            process. Otherwise, a tuple containing the
            global bound, the termination condition string,
            and the number of explored nodes.
        """
        assert self.initialized
        self._update_solve_info(solve_info.data, source)
        self.needs_work_queue.append(source)
        self.has_work.discard(source)
        if source in self.last_known_bound:
            val_ = self.last_known_bound[source]
            try:
                self.external_bounds.remove(val_)
            except ValueError:
                # rare, but can happen when
                # _check_update_best_objective modifies
                # the external_bounds list
                pass
        self._check_update_best_objective(best_objective)
        if len(node_data_list):
            for node_data in node_data_list:
                self._add_work_to_queue(node_data, set_tree_id=True)
        else:
            if not self.first_update[source]:
                self._check_update_worst_terminal_bound(previous_bound)
        self.first_update[source] = False
        last_global_bound = self.last_global_bound
        self._check_convergence()
        ret = self._send_work()
        stop = ret[0]
        if not stop:
            if self.journalist is not None:
                force = (last_global_bound == \
                         self.converger.unbounded_objective) and \
                         (last_global_bound != \
                          self.last_global_bound)
                self.journalist.tic(force=force)
        else:
            if self.journalist is not None:
                self.journalist.tic(force=True)
                self.journalist.log_info(self.journalist._lines)
            assert self.initialized
            self.initialized = False
        return ret

    #
    # Distributed Interface
    #

    def serve(self):
        """Start listening for distributed branch-and-bound
        commands and map them to commands in the local
        dispatcher interface."""
        def rebuild_update_requests(size):
            update_requests = {}
            # Note: The code below relies on the fact that
            #       this is an array.array type and _not_ a
            #       numpy.array type. It issumes a copy of
            #       the data is made when a slice is
            #       created.
            update_data = array.array('d', [0]) * size
            for i in self.worker_ranks:
                update_requests[i] = self.comm.Recv_init(
                    update_data, source=i, tag=DispatcherAction.update)
            return update_requests, update_data

        update_requests = None
        data = None
        solve_info_ = _SolveInfo()
        msg = Message(self.comm)
        while (1):
            msg.probe()
            tag = msg.tag
            source = msg.source
            if tag == DispatcherAction.update:
                size = msg.status.Get_count(datatype=mpi4py.MPI.DOUBLE)
                if (data is None) or \
                   (len(data) < size):
                    update_requests, data = \
                        rebuild_update_requests(size)
                req = update_requests[msg.status.Get_source()]
                req.Start()
                req.Wait()
                best_objective = float(data[0])
                previous_bound = float(data[1])
                assert int(data[2]) == data[2]
                nodes_receiving_count = int(data[2])
                solve_info_.data[:] = data[3:(_SolveInfo._data_size + 3)]
                if nodes_receiving_count > 0:
                    pos = 3 + _SolveInfo._data_size
                    node_data_list = []
                    for i in range(nodes_receiving_count):
                        assert int(data[pos]) == data[pos]
                        data_size = int(data[pos])
                        pos += 1
                        node_data_list.append(data[pos:pos + data_size])
                        pos += data_size
                else:
                    node_data_list = ()
                ret = self.update(best_objective, previous_bound, solve_info_,
                                  node_data_list, source)
                stop = ret[0]
                if stop:
                    return (
                        ret[1],  # best_objective
                        ret[2][0],  # global_bound
                        ret[2][1],  # termination_condition
                        ret[2][2])  # global_solve_info
            elif tag == DispatcherAction.log_info:
                msg.recv(mpi4py.MPI.CHAR)
                self.log_info(msg.data)
            elif tag == DispatcherAction.log_warning:
                msg.recv(mpi4py.MPI.CHAR)
                self.log_warning(msg.data)
            elif tag == DispatcherAction.log_debug:
                msg.recv(mpi4py.MPI.CHAR)
                self.log_debug(msg.data)
            elif tag == DispatcherAction.log_error:
                msg.recv(mpi4py.MPI.CHAR)
                self.log_error(msg.data)
            elif tag == DispatcherAction.stop_listen:
                msg.recv()
                assert msg.data is None
                return (None, None, None, None)
            else:  #pragma:nocover
                raise RuntimeError("Dispatcher received invalid "
                                   "message tag '%s' from rank '%s'" %
                                   (tag, source))
Example #27
0
class PriorityDict(MutableMapping):
    """
    A PriorityDict provides the same methods as a dict. Additionally, a
    PriorityDict efficiently maintains its keys in value sorted order.
    Consequently, the keys method will return the keys in value sorted order,
    the popitem method will remove the item with the highest value, etc.
    """
    def __init__(self, *args, **kwargs):
        """
        A PriorityDict provides the same methods as a dict. Additionally, a
        PriorityDict efficiently maintains its keys in value sorted order.
        Consequently, the keys method will return the keys in value sorted
        order, the popitem method will remove the item with the highest value,
        etc.
        If the first argument is the boolean value False, then it indicates
        that keys are not comparable. By default this setting is True and
        duplicate values are tie-breaked on the key. Using comparable keys
        improves the performance of the PriorityDict.
        An optional *iterable* argument provides an initial series of items to
        populate the PriorityDict.  Each item in the sequence must itself
        contain two items. The first is used as a key in the new dictionary,
        and the second as the key's value. If a given key is seen more than
        once, the last value associated with it is retained in the new
        dictionary.
        If keyword arguments are given, the keywords themselves with their
        associated values are added as items to the dictionary. If a key is
        specified both in the positional argument and as a keyword argument, the
        value associated with the keyword is retained in the dictionary. For
        example, these all return a dictionary equal to ``{"one": 2, "two":
        3}``:
        * ``SortedDict(one=2, two=3)``
        * ``SortedDict({'one': 2, 'two': 3})``
        * ``SortedDict(zip(('one', 'two'), (2, 3)))``
        * ``SortedDict([['two', 3], ['one', 2]])``
        The first example only works for keys that are valid Python
        identifiers; the others work with any valid keys.
        Note that this constructor mimics the Python dict constructor. If
        you're looking for a constructor like collections.Counter(...), see
        PriorityDict.count(...).
        """
        self._dict = dict()

        if len(args) > 0 and isinstance(args[0], bool):
            if args[0]:
                self._list = SortedList()
            else:
                self._list = SortedListWithKey(key=lambda tup: tup[0])
        else:
            self._list = SortedList()

        self.iloc = _IlocWrapper(self)
        self.update(*args, **kwargs)

    def clear(self):
        """Remove all elements from the dictionary."""
        self._dict.clear()
        self._list.clear()

    def clean(self, value=0):
        """
        Remove all items with value less than or equal to `value`.
        Default `value` is 0.
        """
        _list, _dict = self._list, self._dict
        pos = self.bisect_right(value)
        for key in (key for value, key in _list[:pos]):
            del _dict[key]
        del _list[:pos]

    def __contains__(self, key):
        """Return True if and only if *key* is in the dictionary."""
        return key in self._dict

    def __delitem__(self, key):
        """
        Remove ``d[key]`` from *d*.  Raises a KeyError if *key* is not in the
        dictionary.
        """
        value = self._dict[key]
        self._list.remove((value, key))
        del self._dict[key]

    def __getitem__(self, key):
        """
        Return the priority of *key* in *d*.  Raises a KeyError if *key* is not
        in the dictionary.
        """
        return self._dict[key]

    def __iter__(self):
        """
        Create an iterator over the keys of the dictionary ordered by the value
        sort order.
        """
        return iter(key for value, key in self._list)

    def __reversed__(self):
        """
        Create an iterator over the keys of the dictionary ordered by the
        reversed value sort order.
        """
        return iter(key for value, key in reversed(self._list))

    def __len__(self):
        """Return the number of (key, value) pairs in the dictionary."""
        return len(self._dict)

    def __setitem__(self, key, value):
        """Set `d[key]` to *value*."""
        if key in self._dict:
            old_value = self._dict[key]
            self._list.remove((old_value, key))
        self._list.add((value, key))
        self._dict[key] = value

    def copy(self):
        """Create a shallow copy of the dictionary."""
        result = PriorityDict()
        result._dict = self._dict.copy()
        result._list = self._list.copy()
        result.iloc = _IlocWrapper(result)
        return result

    def __copy__(self):
        """Create a shallow copy of the dictionary."""
        return self.copy()

    @classmethod
    def fromkeys(cls, iterable, value=0):
        """
        Create a new dictionary with keys from `iterable` and values set to
        `value`. The default *value* is 0.
        """
        return PriorityDict((key, value) for key in iterable)

    def get(self, key, default=None):
        """
        Return the value for *key* if *key* is in the dictionary, else
        *default*.  If *default* is not given, it defaults to ``None``,
        so that this method never raises a KeyError.
        """
        return self._dict.get(key, default)

    def has_key(self, key):
        """Return True if and only in *key* is in the dictionary."""
        return key in self._dict

    def pop(self, key, default=_NotGiven):
        """
        If *key* is in the dictionary, remove it and return its value,
        else return *default*. If *default* is not given and *key* is not in
        the dictionary, a KeyError is raised.
        """
        if key in self._dict:
            value = self._dict[key]
            self._list.remove((value, key))
            return self._dict.pop(key)
        else:
            if default == _NotGiven:
                raise KeyError
            else:
                return default

    def popitem(self, index=-1):
        """
        Remove and return item at *index* (default: -1). Raises IndexError if
        dict is empty or index is out of range. Negative indices are supported
        as for slice indices.
        """
        value, key = self._list.pop(index)
        del self._dict[key]
        return key, value

    def setdefault(self, key, default=0):
        """
        If *key* is in the dictionary, return its value.  If not, insert *key*
        with a value of *default* and return *default*.  *default* defaults to
        ``0``.
        """
        if key in self._dict:
            return self._dict[key]
        else:
            self._dict[key] = default
            self._list.add((default, key))
            return default

    def elements(self):
        """
        Return an iterator over elements repeating each as many times as its
        count. Elements are returned in value sort-order. If an element’s count
        is less than one, elements() will ignore it.
        """
        values = (repeat(key, value) for value, key in self._list)
        return chain.from_iterable(values)

    def most_common(self, count=None):
        """
        Return a list of the `count` highest priority elements with their
        priority. If `count` is not specified, `most_common` returns *all*
        elements in the dict. Elements with equal counts are ordered by key.
        """
        _list, _dict = self._list, self._dict

        if count is None:
            return [(key, value) for value, key in reversed(_list)]

        end = len(_dict)
        start = end - count

        return [(key, value) for value, key in reversed(_list[start:end])]

    def subtract(self, elements):
        """
        Elements are subtracted from an iterable or from another mapping (or
        counter). Like dict.update() but subtracts counts instead of replacing
        them. Both inputs and outputs may be zero or negative.
        """
        self -= Counter(elements)

    def tally(self, *args, **kwargs):
        """
        Elements are counted from an iterable or added-in from another mapping
        (or counter). Like dict.update() but adds counts instead of replacing
        them. Also, the iterable is expected to be a sequence of elements, not a
        sequence of (key, value) pairs.
        """
        self += Counter(*args, **kwargs)

    @classmethod
    def count(self, *args, **kwargs):
        """
        Consume `args` and `kwargs` with a Counter and use that mapping to
        initialize a PriorityDict.
        """
        return PriorityDict(Counter(*args, **kwargs))

    def update(self, *args, **kwargs):
        """
        Update the dictionary with the key/value pairs from *other*, overwriting
        existing keys.
        *update* accepts either another dictionary object or an iterable of
        key/value pairs (as a tuple or other iterable of length two).  If
        keyword arguments are specified, the dictionary is then updated with
        those key/value pairs: ``d.update(red=1, blue=2)``.
        """
        _list, _dict = self._list, self._dict

        if len(args) == 1 and len(kwargs) == 0 and isinstance(args[0], Mapping):
            items = args[0]
        else:
            items = dict(*args, **kwargs)

        if (10 * len(items)) > len(_dict):
            _dict.update(items)
            _list.clear()
            _list.update((value, key) for key, value in iteritems(_dict))
        else:
            for key, value in iteritems(items):
                old_value = _dict[key]
                _list.remove((old_value, key))
                _dict[key] = value
                _list.add((value, key))

    def index(self, key):
        """
        Return the smallest *i* such that `d.iloc[i] == key`.  Raises KeyError
        if *key* is not present.
        """
        value = self._dict[key]
        return self._list.index((value, key))

    def bisect_left(self, value):
        """
        Similar to the ``bisect`` module in the standard library, this returns
        an appropriate index to insert *value* in PriorityDict. If *value* is
        already present in PriorityDict, the insertion point will be before (to
        the left of) any existing entries.
        """
        return self._list.bisect_left((value,))

    def bisect(self, value):
        """Same as bisect_left."""
        return self._list.bisect((value,))

    def bisect_right(self, value):
        """
        Same as `bisect_left`, but if *value* is already present in
        PriorityDict, the insertion point will be after (to the right
        of) any existing entries.
        """
        return self._list.bisect_right((value, _Biggest))

    def __iadd__(self, that):
        """Add values from `that` mapping."""
        _list, _dict = self._list, self._dict
        if len(_dict) == 0:
            _dict.update(that)
            _list.update((value, key) for key, value in iteritems(_dict))
        elif len(that) * 3 > len(_dict):
            _list.clear()
            for key, value in iteritems(that):
                if key in _dict:
                    _dict[key] += value
                else:
                    _dict[key] = value
            _list.update((value, key) for key, value in iteritems(_dict))
        else:
            for key, value in iteritems(that):
                if key in _dict:
                    old_value = _dict[key]
                    _list.remove((old_value, key))
                    value = old_value + value
                _dict[key] = value
                _list.add((value, key))
        return self

    def __isub__(self, that):
        """Subtract values from `that` mapping."""
        _list, _dict = self._list, self._dict
        if len(_dict) == 0:
            _dict.clear()
            _list.clear()
        elif len(that) * 3 > len(_dict):
            _list.clear()
            for key, value in iteritems(that):
                if key in _dict:
                    _dict[key] -= value
            _list.update((value, key) for key, value in iteritems(_dict))
        else:
            for key, value in iteritems(that):
                if key in _dict:
                    old_value = _dict[key]
                    _list.remove((old_value, key))
                    value = old_value - value
                    _dict[key] = value
                    _list.add((value, key))
        return self

    def __ior__(self, that):
        """Or values from `that` mapping (max(v1, v2))."""
        _list, _dict = self._list, self._dict
        if len(_dict) == 0:
            _dict.update(that)
            _list.update((value, key) for key, value in iteritems(_dict))
        elif len(that) * 3 > len(_dict):
            _list.clear()
            for key, value in iteritems(that):
                if key in _dict:
                    old_value = _dict[key]
                    _dict[key] = old_value if old_value > value else value
                else:
                    _dict[key] = value
            _list.update((value, key) for key, value in iteritems(_dict))
        else:
            for key, value in iteritems(that):
                if key in _dict:
                    old_value = _dict[key]
                    _list.remove((old_value, key))
                    value = old_value if old_value > value else value
                _dict[key] = value
                _list.add((value, key))
        return self

    def __iand__(self, that):
        """And values from `that` mapping (min(v1, v2))."""
        _list, _dict = self._list, self._dict
        if len(_dict) == 0:
            _dict.clear()
            _list.clear()
        elif len(that) * 3 > len(_dict):
            _list.clear()
            for key, value in iteritems(that):
                if key in _dict:
                    old_value = _dict[key]
                    _dict[key] = old_value if old_value < value else value
            _list.update((value, key) for key, value in iteritems(_dict))
        else:
            for key, value in iteritems(that):
                if key in _dict:
                    old_value = _dict[key]
                    _list.remove((old_value, key))
                    value = old_value if old_value < value else value
                    _dict[key] = value
                    _list.add((value, key))
        return self

    def __add__(self, that):
        """Add values from this and `that` mapping."""
        result = PriorityDict()
        _list, _dict = result._list, result._dict
        _dict.update(self._dict)
        for key, value in iteritems(that):
            if key in _dict:
                _dict[key] += value
            else:
                _dict[key] = value
        _list.update((value, key) for key, value in iteritems(_dict))
        return result

    def __sub__(self, that):
        """Subtract values in `that` mapping from this."""
        result = PriorityDict()
        _list, _dict = result._list, result._dict
        _dict.update(self._dict)
        for key, value in iteritems(that):
            if key in _dict:
                _dict[key] -= value
        _list.update((value, key) for key, value in iteritems(_dict))
        return result

    def __or__(self, that):
        """Or values from this and `that` mapping."""
        result = PriorityDict()
        _list, _dict = result._list, result._dict
        _dict.update(self._dict)
        for key, value in iteritems(that):
            if key in _dict:
                old_value = _dict[key]
                _dict[key] = old_value if old_value > value else value
            else:
                _dict[key] = value
        _list.update((value, key) for key, value in iteritems(_dict))
        return result

    def __and__(self, that):
        """And values from this and `that` mapping."""
        result = PriorityDict()
        _list, _dict = result._list, result._dict
        _dict.update(self._dict)
        for key, value in iteritems(that):
            if key in _dict:
                old_value = _dict[key]
                _dict[key] = old_value if old_value < value else value
        _list.update((value, key) for key, value in iteritems(_dict))
        return result

    def __eq__(self, that):
        """Compare two mappings for equality."""
        if isinstance(that, PriorityDict):
            that = that._dict
        return self._dict == that

    def __ne__(self, that):
        """Compare two mappings for inequality."""
        if isinstance(that, PriorityDict):
            that = that._dict
        return self._dict != that

    def __lt__(self, that):
        """Compare two mappings for less than."""
        if isinstance(that, PriorityDict):
            that = that._dict
        _dict = self._dict
        return (_dict != that and self <= that)

    def __le__(self, that):
        """Compare two mappings for less than equal."""
        if isinstance(that, PriorityDict):
            that = that._dict
        _dict = self._dict
        return (len(_dict) <= len(that) and
                all(_dict[key] <= that[key] if key in that else False
                    for key in _dict))

    def __gt__(self, that):
        """Compare two mappings for greater than."""
        if isinstance(that, PriorityDict):
            that = that._dict
        _dict = self._dict
        return (_dict != that and self >= that)

    def __ge__(self, that):
        """Compare two mappings for greater than equal."""
        if isinstance(that, PriorityDict):
            that = that._dict
        _dict = self._dict
        return (len(_dict) >= len(that) and
                all(_dict[key] >= that[key] if key in _dict else False
                    for key in that))

    def isdisjoint(self, that):
        """
        Return True if no key in `self` is also in `that`.
        This doesn't check that the value is greater than zero.
        To remove keys with value less than or equal to zero see *clean*.
        """
        return not any(key in self for key in that)

    def items(self):
        """
        Return a list of the dictionary's items (``(key, value)``
        pairs). Items are ordered by their value from least to greatest.
        """
        return list((key, value) for value, key in self._list)

    def iteritems(self):
        """
        Return an iterable over the items (``(key, value)`` pairs) of the
        dictionary. Items are ordered by their value from least to greatest.
        """
        return iter((key, value) for value, key in self._list)

    @not26
    def viewitems(self):
        """
        In Python 2.7 and later, return a new `ItemsView` of the dictionary's
        items. Beware iterating the `ItemsView` as items are unordered.
        In Python 2.6, raise a NotImplementedError.
        """
        if hexversion < 0x03000000:
            return self._dict.viewitems()
        else:
            return self._dict.items()

    def keys(self):
        """
        Return a list of the dictionary's keys. Keys are ordered
        by their corresponding value from least to greatest.
        """
        return list(key for value, key in self._list)

    def iterkeys(self):
        """
        Return an iterable over the keys of the dictionary. Keys are ordered
        by their corresponding value from least to greatest.
        """
        return iter(key for value, key in self._list)

    @not26
    def viewkeys(self):
        """
        In Python 2.7 and later, return a new `KeysView` of the dictionary's
        keys. Beware iterating the `KeysView` as keys are unordered.
        In Python 2.6, raise a NotImplementedError.
        """
        if hexversion < 0x03000000:
            return self._dict.viewkeys()
        else:
            return self._dict.keys()

    def values(self):
        """
        Return a list of the dictionary's values. Values are
        ordered from least to greatest.
        """
        return list(value for value, key in self._list)

    def itervalues(self):
        """
        Return an iterable over the values of the dictionary. Values are
        iterated from least to greatest.
        """
        return iter(value for value, key in self._list)

    @not26
    def viewvalues(self):
        """
        In Python 2.7 and later, return a `ValuesView` of the dictionary's
        values. Beware iterating the `ValuesView` as values are unordered.
        In Python 2.6, raise a NotImplementedError.
        """
        if hexversion < 0x03000000:
            return self._dict.viewvalues()
        else:
            return self._dict.values()

    def __repr__(self):
        """Return a string representation of PriorityDict."""
        return 'PriorityDict({0})'.format(repr(dict(self)))

    def _check(self):
        self._list._check()
        assert len(self._dict) == len(self._list)
        assert all(key in self._dict and self._dict[key] == value
                   for value, key in self._list)
Example #28
0
             clist = [ivalue]
             cstart = ipos
             cend = ipos
             cPass = not options.debug #and (windowHigh-windowLow >= variCutoff)
         # Increase position in window towards end of the list
         posInWindow += 1
       evaluateValues(chrom,cstart,cend,clist,cPass)
     # Clean-up for next region
     chrom = nchrom
     pos = npos
     step = nstep
     cstart,cend = None,None
     cPass = not options.debug
     clist = []
     windowValues = []
     windowValuesSorted.clear()
     posInWindow = 0
 else:
   if len(line.strip()) == 0: continue 
   pos += step
   value = int(line)
   
   # Window buffer has not reached its size yet
   if len(windowValues) < windowSize:
     windowValues.append(value)
     windowValuesSorted.add(value)
   # Window buffer has reached its size
   elif len(windowValues) == windowSize:
     # Buffer filled but current position is not yet centered in the window,
     # work with current buffer state
     if (posInWindow < halfWindow):
Example #29
0
class BeliefBase:
    """
    Belief base that implements epistemic entrenchment
    with finite partial entrenchment ranking.

    Each belief is assigned an order (a real number between 0 and 1)
    which determines its entrenchment, i.e. the level of commitment
    to maintain it when applying a change function (contraction,
    revision, etc).
    """
    def __init__(self):
        # Sort by decreasing order
        self.beliefs = SortedList(key=lambda b: neg(b.order))
        self._reorder_queue = []

    def _add_reorder_queue(self, belief, order):
        """
        Add command to queue for change of belief order.
        """
        self._reorder_queue.append((belief, order))

    def _run_reorder_queue(self):
        """
        Execute commands in change order queue.
        """
        for belief, order in self._reorder_queue:
            self.beliefs.remove(belief)
            # Ignore beliefs with order = 0
            if order > 0:
                belief.order = order
                self.beliefs.add(belief)
        self._reorder_queue = []

    def _discard_formula(self, formula):
        """
        Removes any beliefs with given formula, regardless of order.
        """
        for belief in self.beliefs:
            if belief.formula == formula:
                self._add_reorder_queue(belief, 0)
        self._run_reorder_queue()

    def iter_by_order(self):
        """
        Generator that groups beliefs in belief base by decreasing order.

        Yields:
            Tuples of type (order, list of beliefs with that order).

        Example:
            >>> bb = BeliefBase()
            >>> bb.add('a', 0.7)
            >>> bb.add('a|b', 0.7)
            >>> bb.add('b', 0.5)
            >>> bb.add('a&f', 0.1)
            >>> for it in bb.iter_by_order():
            ...     print(it)
            (0.7, [Belief(a, order=0.7), Belief(a | b, order=0.7)])
            (0.5, [Belief(b, order=0.5)])
            (0.1, [Belief(a & f, order=0.1)])
        """
        result = []
        last_order = None

        for belief in self.beliefs:
            # If it is the first belief we examine, add it and set last_order
            if last_order is None:
                result.append(belief)
                last_order = belief.order
                continue

            # If the order of this belief is "equal" to the previous, add it to the group
            if isclose(belief.order, last_order):
                result.append(belief)
            # Otherwise, yield the group and reset
            else:
                yield last_order, result
                result = []
                result.append(belief)
                last_order = belief.order

        # Yield last result
        yield last_order, result

    def add(self, formula, order):
        """
        Adds belief to the belief base of given formula and order.

        The operation is not safe since the validity of the postulates
        is not guaranteed after insertion.
        """
        formula = to_cnf(formula)
        _validate_order(order)

        # Remove duplicates
        self._discard_formula(formula)

        # Ignore beliefs with order = 0
        if order > 0:
            belief = Belief(formula, order)
            self.beliefs.add(belief)

    def degree(self, formula):
        """
        Find maximum order j such that taking all beliefs in base
        with order >= j results in a belief set that entails formula.

        TODO: Implement with binary search.
        """
        formula = to_cnf(formula)
        if entails([], formula):
            # Tautologies have degree = 1
            return 1

        base = []
        for order, group in self.iter_by_order():
            # Get formulas from beliefs
            base += [b.formula for b in group]
            if entails(base, formula):
                return order
        return 0

    def expand(self, formula, order, add_on_finish=True):
        """
        Updates entrenchment ranking for belief base expansion.

        Params:
            - add_on_finish: If true, the formula will be added
            to the belief base after the ranking has been updated.
        """
        x = to_cnf(formula)
        _validate_order(order)
        logger.debug(f'Expanding with {x} and order {order}')

        if not entails([], ~x):
            # If x is a contradiction, ignore
            if entails([], x):
                # If x is a tautology, assign order = 1
                order = 1
            else:
                for belief in self.beliefs:
                    y = belief.formula
                    if belief.order > order:
                        # Don't change beliefs of higher order
                        continue

                    # Degree of implication x -> y
                    d = self.degree(x >> y)
                    if (entails([], Equivalent(x, y))  # if |- (x <-> y)
                            or belief.order <= order < d):
                        logger.debug(f'{belief} raised to order {order}')
                        self._add_reorder_queue(belief, order)
                    else:
                        self._add_reorder_queue(belief, d)
                self._run_reorder_queue()

            if add_on_finish:
                self.add(x, order)

        logger.debug(f'New belief base:\n{self}')

    def contract(self, formula, order):
        """
        Updates entrenchment ranking for belief base contraction.
        """
        x = to_cnf(formula)
        _validate_order(order)
        logger.debug(f'Contracting with {x} and order {order}')

        for belief in self.beliefs:
            y = belief.formula
            # Lower entrenchment if x and x|y have same degree
            if belief.order > order:
                dx = self.degree(x)
                xory = associate(Or, [x, y])
                dxory = self.degree(xory)
                if dx == dxory:
                    logger.debug(
                        f'degree({x}) = {dx}, degree({xory}) = {dxory}')
                    logger.debug(f'{belief} lowered to order {order}')
                    self._add_reorder_queue(belief, order)
        self._run_reorder_queue()

        logger.debug(f'New belief base:\n{self}')

    def revise(self, formula, order, add_on_finish=True):
        """
        Updates entrenchment ranking for belief base revision.

        Params:
            - add_on_finish: If true, the formula will be added
            to the belief base after the ranking has been updated.
        """
        x = to_cnf(formula)
        _validate_order(order)
        dx = self.degree(x)
        logger.debug(f'Revising with {x} and order {order} and degree {dx}')

        if not entails([], ~x):
            # If x is a contradiction, ignore
            if entails([], x):
                # If x is a tautology, assign order = 1
                order = 1
            elif order <= dx:
                self.contract(x, order)
            else:
                self.contract(~x, 0)
                self.expand(x, order, add_on_finish=False)

            if add_on_finish:
                self.add(x, order)

        logger.debug(f'New belief base:\n{self}')

    def clear(self):
        """
        Empty belief base.
        """
        self.beliefs.clear()

    def __len__(self):
        return len(self.beliefs)

    def __iter__(self):
        return iter(self.beliefs)

    def __reversed__(self):
        return reversed(self.beliefs)

    def __getitem__(index):
        return self.beliefs[index]

    def __repr__(self):
        if len(self.beliefs) == 0:
            return 'empty'
        return '\n'.join(str(x) for x in self.beliefs)
Example #30
0
    class RankingHandler:
        """ Handle the maintenance around a list of postings.

        1. A user isn't going to go through all returned URLs, hence it makes sense to bound the number of rankings we
           have to work with. This is tunable in search.json.
        2. Prior to adding a new document, we ensure that the invariant above is maintained. If we reach our size limit,
           then we evict the document with the smallest ranking prior to adding a new one.
        3. This class also handles URL parsing. Profiling revealed that repeated calls to urllib.parse was fairly
           expensive, so we now only perform this once per new URL.
        4. To support the use of skip pointers when performing an intersection, we also provide a "next_largest"
           method, which we can perform in sub-linear time due to maintaining a sorted list of document IDs.

        """
        def __init__(self, **kwargs):
            if kwargs['ranker']['maximumSearchEntries'] > 0:
                self.maximum_size = kwargs['ranker']['maximumSearchEntries']
            else:
                self.maximum_size = math.inf

            self.doc_ids = SortedList()
            self.document_v = dict()
            self.rankings = dict()
            self.url_depths = dict()
            self.urls = dict()

        def reset(self):
            self.document_v.clear()
            self.rankings.clear()
            self.doc_ids.clear()
            self.url_depths.clear()
            self.urls.clear()

        def setup(self, doc_id, url):
            if url not in self.url_depths:
                self.url_depths[url] = urlparse(url).path.split('/')

            if doc_id in self.doc_ids:
                return
            elif len(self.rankings) >= self.maximum_size:
                smallest_ranking = None
                for i, (_doc_id, _v) in enumerate(self.rankings.items()):
                    if i == 0:
                        smallest_ranking = (
                            _doc_id,
                            _v,
                        )
                    elif smallest_ranking[1] < _v:
                        smallest_ranking = (
                            _doc_id,
                            _v,
                        )

                if doc_id != smallest_ranking[0]:
                    del self.urls[smallest_ranking[0]]
                    del self.rankings[smallest_ranking[0]]
                    del self.document_v[smallest_ranking[0]]
                    self.doc_ids.remove(smallest_ranking[0])

            self.rankings[doc_id] = 0
            self.doc_ids.add(doc_id)
            self.urls[doc_id] = url

        def add(self, doc_id, v):
            self.rankings[doc_id] += v

        def record(self, doc_id, document_v, entry_k):
            enhanced_v = list((d, entry_k) for d in document_v)
            if doc_id in self.document_v:
                self.document_v[doc_id] = list(
                    heapq.merge(self.document_v[doc_id],
                                enhanced_v,
                                key=lambda a: a[0]))
            else:
                self.document_v[doc_id] = enhanced_v

        def remove(self, doc_id):
            if doc_id in self.doc_ids:
                del self.rankings[doc_id]
                del self.document_v[doc_id]
                del self.urls[doc_id]
                self.doc_ids.remove(doc_id)

        def contains(self, doc_id):
            return doc_id in self.doc_ids

        def next_largest(self, doc_id):
            largest_index = bisect.bisect_right(self.doc_ids, doc_id)
            if largest_index == len(self.doc_ids):
                return None
            else:
                return self.doc_ids[largest_index]

        def __call__(self, *args, **kwargs):
            return list((self.urls[d[0]], d[1]) for d in sorted(
                self.rankings.items(), key=lambda j: j[1], reverse=True))
Example #31
0
class App:
    #!! y axis points down
    def __init__(self, width, height, scale=None,
                  c=0, #initial value of c in the equation z -> z^2+c
                  point_limit=200000, #maximum number of points drawn or -1 for unlimited, increase if Julia sets aren't drawn thoroughly enough even after the drawing stops
                  approach=30, #increase if you sometimes see points clearly out of the JS, decrease otherwise
                  center_x=0, center_y=0,
                  points_per_frame=1000, #number of points drawn at once, decrease if it lags, increase if Julia sets aren't drawn quickly enough
                  drag_point_size=1, point_size=0, #size of a point while dragging and otherwise, a single pixel will be drawn for sizes < 1
                  label_height=None, label_offset=None, label_font_size=None,
                  ):
                #parameter value of `None` means autodecide
        self.width = width
        self.height = height
        self.label_height = self.height//14 if label_height is None else label_height
        self.label_offset = self.width//30 if label_offset is None else label_offset
        self.label_font_size = min(self.label_height//2, self.width//22) if label_font_size is None else label_font_size
        self.screen = pygame.display.set_mode((self.width, self.height + self.label_height))
        self.label = self.screen.subsurface((0, 0, self.width, self.label_height))
        self.label_bg = (192,192,192)
        self.canvas = self.screen.subsurface((0, self.label_height, self.width, self.height))
        self.font = pygame.font.SysFont("consolas", self.label_font_size)
        self.scale = scale if scale is not None else min(self.width, self.height) // 4
        self.c = c
        self.num_points = 0
        self.to_draw = SortedList(key = lambda x:-x[1])
        self.point_limit = point_limit
        self.approach = approach
        self.center_x = center_x
        self.center_y = center_y
        self.points_per_frame = points_per_frame
        self.drag_point_size = int(drag_point_size)
        self.point_size = int(point_size)
        self.drag = None
        self.motion = None
        pygame.display.set_caption("Interactive Julia set generator")
        icon = pygame.image.load("icon.png")
        pygame.display.set_icon(icon)
        self.mandelbrot = pygame.image.load("mandelbrot.png") #image of mandelbrot set in the square {x+yi | max(abs(x),abs(y))<2}
        self.reset()
        
    def run(self):
        pygame.display.flip()
        while True:
            pygame.time.wait(20)
            if self.drag is not None:
                self.update_c(self.drag)
            self.drag = None
            self.motion = None
            for event in pygame.event.get():
                if event.type == pygame.QUIT:
                    pygame.quit()
                    sys.exit()
                elif event.type == pygame.MOUSEMOTION and event.buttons[0]:
                    self.drag = event
                elif event.type == pygame.MOUSEMOTION:
                    self.motion = event
                elif event.type == pygame.WINDOWLEAVE:
                    self.motion = self.drag = None
                    self.update_label()
                    break
                elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
                    self.update_c(event)
                elif event.type == pygame.KEYDOWN:
                    if event.key == pygame.K_s and event.mod & pygame.KMOD_CTRL:
                        self.export_image()
                        pygame.event.clear()
                        break
                    elif event.key in (pygame.K_UP, pygame.K_DOWN, pygame.K_LEFT, pygame.K_RIGHT):
                        x,y = self.c.real, self.c.imag
                        x,y = self.to_canvas_coords(x,y)
                        dx,dy = {pygame.K_UP: (0,-1), pygame.K_DOWN: (0,1),
                                 pygame.K_LEFT: (-1,0), pygame.K_RIGHT: (1,0)}[event.key]
                        x,y = self.to_plane_coords(x+dx, y+dy, absolute=False)
                        self.c = x + y*1j
                        self.reset()
                        break
            if self.motion is not None:
                self.mousemotion(self.motion)
        
            if self.num_points < self.point_limit or self.point_limit == -1:
                for i in range(self.points_per_frame):
                    point = self.to_draw.pop()
                    self.add_point(point[0])
                    for new_z in iteration(point[0], self.c):
                        new_derivative = point[1] + log(derivative(new_z))
                        self.to_draw.add((new_z, new_derivative))
            pygame.display.flip()
                
    def export_image(self):
        print("exporting image... ", end="")
        c = self.c
        x,y = c.real, c.imag
        default_name = "prisoner_set_{:.2f}{:+.2f}i".format(x,y)
        name = asksaveasfilename(filetypes=[("PNG image", "*.png"), ("All files", "*.*")],
                             defaultextension=".png",
                             initialfile=default_name)
        if name == "":
            print("canceled")
        else:
            img = create_image(c, iterations=100)
            img.save(name)
            print("finished")

    def to_canvas_coords(self, x,y):
        canvas_x = round(self.width / 2 + (x - self.center_x) * self.scale)
        canvas_y = round(self.height / 2 + (-y - self.center_y) * self.scale)
        return (canvas_x, canvas_y)

    def to_plane_coords(self, x,y, absolute=True):
        """absolute - whether the given coordinates are window coordinates (True)
        or canvas coordinates (False)"""
        dx = dy = 0
        if absolute:
            dx,dy = self.canvas.get_abs_offset()
        plane_x = (x - dx - self.width / 2) / self.scale + self.center_x
        plane_y = -((y - dy - self.height / 2) / self.scale + self.center_y)
        return (plane_x, plane_y)
    
    def add_point(self, z):
        x,y = z.real, z.imag
        x,y = self.to_canvas_coords(x,y)
        self.num_points += 1
        if self.drag is not None:
            pygame.draw.circle(self.canvas, (0,0,0), (x,y), self.drag_point_size)
        else:
            pygame.draw.circle(self.canvas, (0,0,0), (x,y), self.point_size)
        self.canvas.set_at((x,y), (0,0,0)) #a single pixel is drawn anyway

    def mousemotion(self, event):
        x,y = event.pos
        x,y = self.to_plane_coords(x,y)
        self.update_label()

    def update_c(self, event):
        x,y = event.pos
        x,y = self.to_plane_coords(x,y)
        self.c = x + y*1j
        self.reset()

    def reset(self):
        z = 50 + 1j
        self.num_points = 0
        for i in range(self.approach):
            z = choice(iteration(z, self.c))
        self.to_draw.clear()
        self.to_draw.add((z, 0))
        self.canvas.fill((255,255,255))
        self.canvas.blit(pygame.transform.smoothscale(self.mandelbrot, (4*self.scale, 4*self.scale)), self.to_canvas_coords(-2,2))
        self.update_label()
                    
    def update_label(self, left=None, right=None):
        self.label.fill(self.label_bg)
        if left is None:
            if pygame.mouse.get_focused():
                left = "pointer: {: .2f}{:+.2f}i".format(*self.to_plane_coords(*pygame.mouse.get_pos()))
            else:
                left = "click to set c"
        if right is None:
            right = "c = {: .2f}{:+.2f}i".format(self.c.real, self.c.imag)
        text_left = self.font.render(left,True,(0,0,0), self.label_bg)
        text_right = self.font.render(right,True,(0,0,0), self.label_bg)
        width = text_right.get_width()
        self.label.blit(text_left, (self.label_offset, (self.label_height - self.font.get_ascent())/2))
        self.label.blit(text_right, (self.width - width - self.label_offset, (self.label_height - self.font.get_ascent())/2))
from sortedcontainers import SortedList
words = ["eat", "tea", "tan", "ate", "nat", "bat"]
dicio = {}
ordenado = SortedList()
key = ""
for j in words:
    ordenado.update(j)
    for h in ordenado:
      key = key+str(h)
    if key not in dicio.keys():
      dicio.setdefault(key, [])
    dicio[key].append(j)
    ordenado.clear()
    key = ""
print(dicio)