示例#1
0
文件: network.py 项目: samriang/pybsd
 def __init__(self, name, ips=None):
     #: :py:class:`str`: a name that identifies the interface.
     self.name = name
     #: :py:class:`sortedcontainers.SortedSet` ([ :py:class:`ipaddress.IPv4Interface` ]): a sorted set containing all the
     #: IPv4 interfaces on this physical interface.
     self.ifsv4 = sortedcontainers.SortedListWithKey(key=lambda x: x.ip.compressed)
     #: :py:class:`sortedcontainers.SortedSet` ([ :py:class:`ipaddress.IPv6Interface` ]): a sorted set containing all the
     #: IPv6 interfaces on this physical interface.
     self.ifsv6 = sortedcontainers.SortedListWithKey(key=lambda x: x.ip.compressed)
     #: :py:class:`ipaddress.IPv4Interface`: this interface's main IPv4 interface
     self.main_ifv4 = None
     #: :py:class:`ipaddress.IPv6Interface`: this interface's main IPv6 interface
     self.main_ifv6 = None
     ips = ips or []
     self.add_ips(ips)
示例#2
0
    def __init__(self):
        super().__init__()
        self._store = {}
        self.filter = matchall
        # Should we show only marked flows?
        self.show_marked = False

        self.default_order = OrderRequestStart(self)
        self.orders = dict(
            time = self.default_order,
            method = OrderRequestMethod(self),
            url = OrderRequestURL(self),
            size = OrderKeySize(self),
        )
        self.order_key = self.default_order
        self.order_reversed = False
        self.focus_follow = False

        self._view = sortedcontainers.SortedListWithKey(key = self.order_key)

        # These signals broadcast events that affect the view. That is, an
        # update to a flow in the store but not in the view does not trigger a
        # signal. All signals are called after the view has been updated.
        self.sig_update = blinker.Signal()
        self.sig_add = blinker.Signal()
        self.sig_remove = blinker.Signal()
        # Signals that the view should be refreshed completely
        self.sig_refresh = blinker.Signal()

        self.focus = Focus(self)
        self.settings = Settings(self)
示例#3
0
    def __init__(self, max_episodes):

        self._max_episodes = max_episodes
        # need a sorted map...by reward

        self.episodes = sortedcontainers.SortedListWithKey(
            key=lambda x: x.total_reward)
示例#4
0
 def top_match(self, source_word):
     topten = sc.SortedListWithKey(key=sortkey)
     for word in [w for w in self._vdata if w != source_word]:
         topten.add(self.distance(source_word, word))
         if len(topten) > 10:
             topten.pop()
     return topten
示例#5
0
def bathroom(nb_s, nb_p):
    blocks = sortedcontainers.SortedListWithKey([(0, nb_s - 1, nb_s)],
                                                key=lambda (s, e, l): (-l, s))
    min_d = -1
    max_d = -1
    for person in range(1, nb_p + 1):
        (min_d, max_d) = add_new_person(blocks)
    return min_d, max_d
示例#6
0
 def set_order(self, order_key: typing.Callable):
     """
         Sets the current view order.
     """
     self.order_key = order_key
     newview = sortedcontainers.SortedListWithKey(key=order_key)
     newview.update(self._view)
     self._view = newview
示例#7
0
    def __init__(self, secondsToLive: Union[int, float]):
        assert isinstance(secondsToLive, (int, float))
        assert secondsToLive > 0

        self.__entriesByDeathTime = sortedcontainers.SortedListWithKey(
            key=_getCEKey)
        self.__entriesByID = dict()
        self.__secondsToLive = secondsToLive

        self.__lock = threading.Lock()
示例#8
0
def load_collection(collection):
    """
    Given a collection directory, load every event in it and return
    them in a chronologically-sorted list.
    """
    def timestamp(event):
        return event.timestamp

    events = (Event.from_filesystem(collection, entry) for entry in os.listdir(collection))
    return sc.SortedListWithKey(events, key=timestamp)
示例#9
0
 def set_order(self, order: str) -> None:
     """
         Sets the current view order.
     """
     if order not in self.orders:
         raise exceptions.CommandError("Unknown flow order: %s" % order)
     order_key = self.orders[order]
     self.order_key = order_key
     newview = sortedcontainers.SortedListWithKey(key=order_key)
     newview.update(self._view)
     self._view = newview
示例#10
0
def __init_priority_list(vtree, p_func, m_func, do_topo_check=False):
    """
    Initializes the priority list of potential collapses to perform
    and populates the list with possible collapses on the original
    polyline.
    """
    ID = vtree[0]
    pl = __sc.SortedListWithKey(key=lambda x: x[0])  # empty sorted list
    for i in range(0, len(ID) - 3):  # loop through collapsible line segments
        __addToPriorityList(i, i + 1, i + 2, i + 3, vtree, pl, p_func, m_func,
                            do_topo_check)
    return pl  # return sorted priority list
 def __init__(self, debugLogPrintFunction: callable = None):
     # list of all activities
     self.__activitiesByTime = sortedcontainers.SortedListWithKey(
         key=_getARKey)
     # list of activities that later can be identified again
     self.__activitiesByID = dict()
     self.__debugLogPrintFunction = debugLogPrintFunction
     self.__lock = threading.Lock()
     self.__evt = threading.Event()
     self.__thread = threading.Thread(target=self.__run, daemon=True)
     self.__bKeepRunning = True
     self.__bRunning = True
     self.__bIsStarted = False
    def receive_a_query_need_those_cache_tiles(self, raster_uid, qi, cache_fps):
        """Receive message: A query started its optional collection phase and require those cache
        tiles.

        Priorities for those cache tiles should be updated if necessary.
        """
        msgs = []

        # Data structures shortcuts ********************************************
        ds0 = self._sorted_prod_tiles_per_cache_tile
        ds2 = self._cache_fp_per_query

        # Checks ***************************************************************
        assert qi not in ds2, 'received two collection phases for that query, should be 1 or 0'

        # Insert in `ds2` ******************************************************
        ds2[qi] = cache_fps

        # Insert in `ds0` and check for prio updates ***************************
        # The merger and writer are interested in this priority update
        cache_tile_updates = set()
        for cache_fp in cache_fps:
            cache_tile_key = (raster_uid, cache_fp)
            prod_tile_key = (qi, qi.dict_of_min_prod_idx_per_cache_fp[cache_fp])
            if cache_tile_key not in ds0:
                ds0[cache_tile_key] = (
                    sortedcontainers.SortedListWithKey(
                        [prod_tile_key],
                        key=lambda k: self.prio_of_prod_tile(*k)
                    )
                )
                cache_tile_updates.add(cache_tile_key)
            else:
                prev_prio = self.prio_of_prod_tile(*ds0[cache_tile_key][0])
                ds0[cache_tile_key].add(prod_tile_key)
                new_prio = self.prio_of_prod_tile(*ds0[cache_tile_key][0])
                if prev_prio != new_prio:
                    cache_tile_updates.add(cache_tile_key)

        # Emmit messages *******************************************************
        if len(cache_tile_updates) != 0:
            self.db_version += 1
            new_prio = Priorities(self, self.db_version)
            msgs += [Msg(
                '/Pool*/WaitingRoom', 'global_priorities_update',
                new_prio, frozenset(), frozenset(cache_tile_updates)
            )]

        return msgs
示例#13
0
 def __init__(self, sorting_fn=None):
     # Requirements:
     # Add element, log n
     # Pop lowest value by occurence time, log n
     #
     # Nice to have:
     # Evict expired processes eg prune values < x, min(# pruned, log n)
     #
     # Current top ideas:
     # SortedListWithKey
     # http://www.grantjenks.com/docs/sortedcontainers/sortedlistwithkey.html
     # IMHO this will do for early iterating; we could build our own, but
     # honestly this is likely more performant. As a plus, it's pythonic AF.
     assert callable(sorting_fn), "Need callable for sorted process queue."
     self._queue = sortedcontainers.SortedListWithKey(key=sorting_fn)
示例#14
0
 def __init__(self,
              vertices,
              faces,
              vertex_map_fn=None,
              remove_bad_faces=False):
     self._vertices = list(vertices)
     self._edge_lengths = {}
     self._edge_faces = defaultdict(dict)
     self._edges = sortedcontainers.SortedListWithKey(
         key=lambda x: self._edge_lengths[x])
     self._faces = set()
     self._vertex_map_fn = vertex_map_fn
     if remove_bad_faces:
         faces = (f for f in faces if has_unique_vertices(f))
     for face in faces:
         self.add_face(tuple(face))
示例#15
0
 def __init__(self,
              experience_buffer,
              metric,
              bins,
              alpha,
              lowest_value_lowest_index=True):
     order_multiplier = 1 if lowest_value_lowest_index else -1
     self.experience_buffer = experience_buffer
     self.bins = bins
     self.bin_indices = [0, 0]
     self.alpha = alpha
     start_list = []
     self.ordered_indices = sortedcontainers.SortedListWithKey(
         start_list,
         key=lambda x: float(order_multiplier * self.experience_buffer.
                             _buffer['experience_meta_data'][metric][x][0]))
     experience_buffer.add_experience_meta_data_update_listener(
         metric, self.update)
示例#16
0
def tfidf_pseudo_relevance_feedback(query_dict,
                                    doc_dict,
                                    k=2,
                                    num_docs=15,
                                    num_words=30):
    # find the average doc length
    average_doc_length = average_length(doc_dict)

    # initialise dictionary for results
    score_dict = {}

    for query_id in query_dict:
        # get the text
        query_text = query_dict[query_id]
        # initialise sorted list which sorts by the score
        top_ranking = sortedcontainers.SortedListWithKey(
            key=lambda (key, value): (value, key))

        for doc_id, tfidf_score in calculate_tfidf(query_text, doc_dict,
                                                   average_doc_length, k):
            top_ranking = find_ranking(top_ranking, doc_id, tfidf_score,
                                       num_docs)

        # go through top ranking docs gather all words
        # TODO: improve
        top_doc_word_list = []
        for doc_id, _ in top_ranking:
            doc_text = doc_dict[doc_id]
            top_doc_word_list += doc_text

        # find the most frequent words
        freq_dist = nltk.FreqDist(word for word in top_doc_word_list)
        best_words = freq_dist.keys()[:num_words]

        # add to the query
        new_query = query_text + best_words

        # recalculate tfidf score and add to score dictionary
        for doc_id, tfidf_score in calculate_tfidf(new_query, doc_dict,
                                                   average_doc_length, k):
            score_dict[query_id, doc_id] = tfidf_score

    return score_dict
示例#17
0
    def newsfeed_ships(self):
        """
        Function that generates ids of ships that was posted to friends' docks and joined
        seas chronologically
        """
        posts = []
        # post is a list of tuples (post_id,post_no_in_sailedships_list)
        for friend in self.friends:
            friend_posts = database[friend].sailed_ships
            no_of_posts = len(friend_posts)
            if no_of_posts != 0:
                newest_post_id = friend_posts[no_of_posts - 1][0]
                posts.append((newest_post_id, no_of_posts - 1))
        for followee in self.followees:
            followee_posts = database[followee].sailed_ships
            no_of_posts = len(followee_posts)
            if no_of_posts != 0:
                newest_post_id = followee_posts[no_of_posts - 1][0]
                posts.append((newest_post_id, no_of_posts - 1))
        for group in self.seas:
            group_posts = database[group].sailed_ships
            no_of_posts = len(group_posts)
            if no_of_posts != 0:
                newest_post_id = group_posts[no_of_posts - 1][0]
                posts.append((newest_post_id, no_of_posts - 1))
        sorted_posts = sortedcontainers.SortedListWithKey(
            posts, key=lambda tup: database[tup[0]].creation_date)

        while True:
            if len(sorted_posts) != 0:
                post_id, post_no = sorted_posts[len(sorted_posts) - 1]
            else:
                break
            yield post_id
            sorted_posts.remove((post_id, post_no))
            if post_no != 0:
                post_creator_id = database[post_id].where_is_it_created_id
                posts = database[post_creator_id].sailed_ships
                # print(post_no-1, posts)
                new_post_id = posts[post_no - 1][0]
                sorted_posts.add((new_post_id, post_no - 1))
示例#18
0
    def __init__(self):
        super().__init__()
        self._store = collections.OrderedDict()
        self.filter = matchall
        # Should we show only marked flows?
        self.show_marked = False

        self.default_order = OrderRequestStart(self)
        self.orders = dict(
            time=OrderRequestStart(self),
            method=OrderRequestMethod(self),
            url=OrderRequestURL(self),
            size=OrderKeySize(self),
        )
        self.order_key = self.default_order
        self.order_reversed = False
        self.focus_follow = False

        self._view = sortedcontainers.SortedListWithKey(key=self.order_key)

        # The sig_view* signals broadcast events that affect the view. That is,
        # an update to a flow in the store but not in the view does not trigger
        # a signal. All signals are called after the view has been updated.
        self.sig_view_update = blinker.Signal()
        self.sig_view_add = blinker.Signal()
        self.sig_view_remove = blinker.Signal()
        # Signals that the view should be refreshed completely
        self.sig_view_refresh = blinker.Signal()

        # The sig_store* signals broadcast events that affect the underlying
        # store. If a flow is removed from just the view, sig_view_remove is
        # triggered. If it is removed from the store while it is also in the
        # view, both sig_store_remove and sig_view_remove are triggered.
        self.sig_store_remove = blinker.Signal()
        # Signals that the store should be refreshed completely
        self.sig_store_refresh = blinker.Signal()

        self.focus = Focus(self)
        self.settings = Settings(self)
 def from_table(cls, table):
     obj = cls()
     obj.table = sortedcontainers.SortedListWithKey(
         table, key=lambda val: -val.fitness)
     return obj
示例#20
0
 def __init__(self, items=None, maxlen=None):
     self.maxlen = maxlen
     self._dict = {}
     self._list = sortedcontainers.SortedListWithKey(key=operator.itemgetter(1))
     if items:
         self.update(items)
示例#21
0
文件: c08.py 项目: sam-willis/MCC
import binascii
import sortedcontainers
from math import ceil

ciphertext_by_score = sortedcontainers.SortedListWithKey(
    key=lambda val: -val[0])

with open("08.txt", 'r') as file:
    texts = [binascii.a2b_hex(line.strip()) for line in file.readlines()]
    for text in texts:
        unique_blocks = set()
        for index in range(0, len(text), 16):
            unique_blocks.add(text[index:index + 16])
        ciphertext_by_score.add((len(unique_blocks), text), )
    print(ciphertext_by_score.pop()[1])
示例#22
0
def make_group(*events):
    return sortedcontainers.SortedListWithKey(events, key=group_key)
示例#23
0
 def clear(self):
     self.values = sortedcontainers.SortedListWithKey(
         key=functools.cmp_to_key(self._cmp))
示例#24
0
 def __init__(self):
     self.logger = logging.getLogger("Mercury.Scheduler")
     self.timer_thread = None
     self.evhandler = eventhandler.EventHandler()
     self.schedule = sortedcontainers.SortedListWithKey(
         key=lambda x: x['when'])
import requests
import random
import json
import sys
import sortedcontainers
from datetime import datetime
import time

#This file only has the actual data generation functions so we can play with them.
GLOBAL_COUNTER = 1432924245  #for testing what rickshaw does w/limited space
ALERT_COUNTER = 15926  # for removing/adding alerts on the fly
STEP_SIZE = 10  #how far we go each "step" in our graph update
##Boundary values for random data:
MIN_INT = 5
MAX_INT = 40
all_alerts = sortedcontainers.SortedListWithKey(key=lambda x: x["score"])
#set up stuff we need for random generation
distribution = {
    200: 80,
    304: 1,
    404: 4,
    206: 1,
    301: 1,
    400: 1,
    403: 1,
    401: 1,
    405: 1,
    411: 1,
    500: 3,
    502: 1,
    503: 1,
 def __init__(self):
     self.table = sortedcontainers.SortedListWithKey(
         key=lambda val: -val.fitness)
示例#27
0
    def run(self):
        """
        Main run function of the order book, executed in a thread using start(). Starts the order book and processes
        all communications.
        """

        # Continue as long as there is no stop signal
        while self.running:

            # Initial variables, specific to each connection
            connection_tries = 0
            connection_delay = 0

            # Initialise the data structure
            for currency_pair in self.markets:
                self.data_store[currency_pair['base_currency'], currency_pair['quote_currency']] = {
                    'order_book_ask': sortedcontainers.SortedListWithKey(key=lambda val: val[0]),
                    'order_book_bid': sortedcontainers.SortedListWithKey(key=lambda val: -val[0]),
                    'last_sequence': None,
                    'status': 'inactive',
                }

            # Connect to the order book. Continue trying in case of issues or a temporary downtime
            while self.socket_handle is None:

                # Delay connecting if required, to prevent flooding the remote server with connection tries
                time.sleep(connection_delay)

                # Call the connect function, implemented by the child class
                try:
                    self.socket_handle = self.connect()
                except OrderBookError as e:
                    logger.warning("Could not connect with the websocket API: %s" % e)

                    connection_tries += 1

                    # Delay the next connection if connecting failed more than 3 times. 1 second for the 4th try,
                    # up until 5 seconds for the 8th try and over
                    if connection_tries > 3:
                        connection_delay = min(connection_tries - 3, 5)

                    # Give up after 2000 failed tries to connect
                    if connection_tries > 2000:
                        raise OrderBookError("Failed to connect with the websocket after 2000 tries")

            logger.info("Order book connection established")

            # Subscribe to all specified markets
            for pair, _ in self.data_store.items():

                # Send subscription message
                self.subscribe(pair[0], pair[1])

                # Update status of market
                self.data_store[pair]['status'] = 'initialising'

            # Run in a loop to process messages until we want to stop, encounter an error or timeout
            while self.running and not self.restart:

                # Call the update method of the child. Each call returns a list with 0 or more update messages
                try:
                    updates = self.receive()
                except OrderBookError as e:
                    logger.warning("Error while receiving data: %s" % e)
                    self.restart = True

                else:
                    # Process all updates
                    if len(updates) > 0:
                        for item in updates[:-1]:
                            self.update(item)
                        self.update(updates[-1], True)

            # Initialise a restart if requested
            if self.restart and self.running:
                logger.info("Order book restart initiated")

                # Try to cleanly disconnect
                self.disconnect()

                # Reset data structures
                self.data_store = {}
                self.socket_handle = None
                self.restart = False

                # Instruct child class to reset its exchange specific data structures, if implemented
                self.reset_data_structures()

        # Disconnect when shutting down
        self.disconnect()
示例#28
0
文件: battle.py 项目: Erotemic/pypogo
 def __init__(self):
     import itertools as it
     self._items = sortedcontainers.SortedListWithKey(key=lambda x: x[0])
     # self._items = PriorityQueue()
     self._nextid = it.count(0)