def busiestServers(self, k, arrival, load):
        """
        :type k: int
        :type arrival: List[int]
        :type load: List[int]
        :rtype: List[int]
        """

        pq = []
        n = len(arrival)
        serves = [0] * k
        finish = SortedSet()
        for i in range(k):
            if i < n:
                heapq.heappush(pq, (arrival[i] + load[i], i))
                serves[i] += 1
        for i in range(k, n):
            arr = arrival[i]
            dur = load[i]
            while pq and pq[0][0] <= arr:
                _, idx = heapq.heappop(pq)
                finish.add(idx)
            idx = (i + k) % k
            if not finish: continue  # no servers available
            if idx not in finish:
                j = finish.bisect(idx)
                if j >= len(finish): j = 0
                idx = finish[j]
            heapq.heappush(pq, (arr + dur, idx))
            serves[idx] += 1
            finish.remove(idx)

        mx = max(serves)
        return [i for i, s in enumerate(serves) if s == mx]
Example #2
0
class _CoinDataSet(object):
    def __init__(self, init_from_file=True):
        self._market = coinmarketcap.Market()
        self._data = SortedSet()
        if init_from_file:
            for filename in os.listdir(STORAGE_DIR):
                with open(os.path.join(STORAGE_DIR, filename), 'r') as fp:
                    datapoint_list = json.load(
                        fp, object_hook=_CDPEncoder.decode_hook)
                    self._data.update(datapoint_list)

    def _DownloadNewDataPoint(self):
        cmc_dict = self._market.ticker(limit=0)
        data_to_store = {
            coin["symbol"]: coin["price_usd"]
            for coin in cmc_dict
        }
        self._data.add(_CoinDataPoint(timestamp(), data_to_store))
        self._DumpCurrentDayToFile()

    def _DumpAllToFile(self, filestr):
        data_to_dump = list(self._data)

        with open(filestr, 'w') as fp:
            json.dump(data_to_dump, fp, cls=_CDPEncoder)

    def _DumpCurrentDayToFile(self):
        # Midnight in unix time (system time zone)
        midnight = datetime.combine(date.today(), time.min)
        midnight_unix = int(midnight.timestamp())

        # All data since midnight.
        data_to_dump = list(self._data.irange(_CoinDataPoint(midnight_unix)))

        filestr = os.path.join(STORAGE_DIR,
                               midnight.strftime('%Y-%m-%d.coinjson'))
        with open(filestr, 'w') as fp:
            json.dump(data_to_dump, fp, cls=_CDPEncoder)

    def GetValue(self, symbol, time=None):
        try:
            if not time:
                return float(self._data[-1].coin_data[symbol.upper()])
            else:
                bisect_point = self._data.bisect(_CoinDataPoint(time))
                if (bisect_point) is 0:
                    return None
                return float(self._data[bisect_point -
                                        1].coin_data[symbol.upper()])
        except (IndexError, KeyError):
            return None

    def GetDayChange(self, symbol):
        currentVal = self.GetValue(symbol)
        yesterday_time = datetime.today() - timedelta(days=1)
        oldVal = self.GetValue(symbol, yesterday_time.timestamp())
        if oldVal is None:
            return None
        return 100 * ((currentVal - oldVal) / oldVal)
Example #3
0
class MyCalendar:
    def __init__(self):
        self.intervals = SortedSet(key=lambda x: x[0])

    def book(self, start: int, end: int) -> bool:
        if start > end:
            return False
        i = self.intervals.bisect((start, end))
        sol = True
        if i != 0:
            sol &= self.intervals[i - 1][1] <= start
        if sol and i != len(self.intervals):
            sol &= self.intervals[i][0] >= end
        sol and self.intervals.add((start, end))
        return sol
Example #4
0
class _PortfolioSet(object):
    def __init__(self, user_id):
        self.user_id = user_id
        self._file = os.path.join(STORAGE_DIR, str(user_id))
        self._data = SortedSet()
        if os.path.exists(self._file):
            with open(self._file, 'r') as fp:
                datapoint_list = json.load(
                    fp, object_hook=_PortfolioEncoder.decode_hook)
                self._data.update(datapoint_list)

    def GetPortfolio(self, timestamp=None):
        rval = None
        try:
            if not timestamp:
                rval = self._data[-1]
            else:
                bisect_point = self._data.bisect(
                    _PortfolioAtTimestamp(0, timestamp))
                if bisect_point > 0:
                    rval = self._data[bisect_point - 1]
        except (IndexError, KeyError):
            pass

        # No portfolio at the specified time, return empty portfolio.
        if not rval:
            return _PortfolioAtTimestamp(self.user_id, timestamp
                                         or time.time())
        # We need to use copy.deepcopy here because nobody old data needs
        # to be kept. The timestamp is set as requested, so the copied portfolio
        # can simply be saved.
        rval = copy.deepcopy(rval)
        rval.timestamp = timestamp or time.time()
        return rval

    def AddPortfolio(self, portfolio):
        self._data.add(portfolio)

    def Save(self):
        data_to_dump = list(self._data)
        with open(self._file, 'w') as fp:
            json.dump(data_to_dump, fp, cls=_PortfolioEncoder)
 def avoidFlood(self, rains):
     n = len(rains)
     ans = [-1 for i in range(n)]
     zeros = SortedSet([])
     stack = {}
     impossible = False
     for i in range(n):
         if rains[i] == 0:
             zeros.add(i)
             continue
         if rains[i] in stack:
             idx = zeros.bisect(stack[rains[i]])
             idxZero = None
             if idx < len(zeros) and zeros[idx] > stack[rains[i]]:
                 idxZero = zeros.pop(idx)
             if idxZero is None:
                 impossible = True
                 break
             ans[idxZero] = rains[i]
         stack[rains[i]] = i
     if impossible:
         return []
     stack = set()
     for i in range(n):
         if rains[i] > 0:
             stack.add(rains[i])
         elif ans[i] == -1:
             if stack:
                 ans[i] = stack.pop()
             else:
                 ans[i] = random.randrange(1, n)
         else:
             stack.remove(ans[i])
     # for i in zeros:
     #     ans[i] = random.randrange(1, n)
     return ans
def test_bisect():
    temp = SortedSet(range(100), load=7)
    assert all(temp.bisect_left(val) == val for val in range(100))
    assert all(temp.bisect(val) == val for val in range(100))
    assert all(temp.bisect_right(val) == (val + 1) for val in range(100))
Example #7
0
class HexaryTrieFog:
    """
    Keeps track of which parts of a trie have been verified to exist.

    Named after "fog of war" popular in video games like... Red Alert? IDK, I'm old.

    Object is immutable. Any changes, like marking a key prefix as complete, will
    return a new HexaryTrieFog object.
    """
    _unexplored_prefixes: GenericSortedSet[Nibbles]

    # INVARIANT: No unexplored prefix may start with another unexplored prefix
    #   For example, _unexplored_prefixes may not be {(1, 2), (1, 2, 3)}.

    def __init__(self) -> None:
        # Always start without knowing anything about a trie. The only unexplored
        #   prefix is the root prefix: (), which means the whole trie is unexplored.
        self._unexplored_prefixes = SortedSet({()})

    def __repr__(self) -> str:
        return f"HexaryTrieFog<{self._unexplored_prefixes!r}>"

    @property
    def is_complete(self) -> bool:
        return len(self._unexplored_prefixes) == 0

    def explore(
            self,
            old_prefix_input: NibblesInput,
            foggy_sub_segments: Sequence[NibblesInput]) -> 'HexaryTrieFog':
        """
        The fog lifts from the old prefix. This call returns a HexaryTrieFog that narrows
        down the unexplored key prefixes. from the old prefix to the indicated children.

        For example, if only the key prefix 0x12 is unexplored, then calling
        explore((1, 2), ((3,), (0xe, 0xf))) would mark large swaths of 0x12 explored, leaving only
        two prefixes as unknown: 0x123 and 0x12ef. To continue exploring those prefixes, navigate
        to them using traverse() or traverse_from().

        The sub_segments_input may be empty, which means the old prefix has been fully explored.
        """
        old_prefix = Nibbles(old_prefix_input)
        sub_segments = [Nibbles(segment) for segment in foggy_sub_segments]
        new_fog_prefixes = self._unexplored_prefixes.copy()

        try:
            new_fog_prefixes.remove(old_prefix)
        except KeyError:
            raise ValidationError(f"Old parent {old_prefix} not found in {new_fog_prefixes!r}")

        if len(set(sub_segments)) != len(sub_segments):
            raise ValidationError(
                f"Got duplicate sub_segments in {sub_segments} to HexaryTrieFog.explore()"
            )

        # Further validation that no segment is a prefix of another
        all_lengths = set(len(segment) for segment in sub_segments)
        if len(all_lengths) > 1:
            # The known use case of exploring nodes one at a time will never arrive in this
            #   validation check which might be slow. Leaf nodes have no sub segments,
            #   extension nodes have exactly one, and branch nodes have all sub_segments
            #   of length 1. If a new use case hits this verification, and speed becomes an issue,
            #   see https://github.com/ethereum/py-trie/issues/107
            for segment in sub_segments:
                shorter_lengths = [length for length in all_lengths if length < len(segment)]
                for check_length in shorter_lengths:
                    trimmed_segment = segment[:check_length]
                    if trimmed_segment in sub_segments:
                        raise ValidationError(
                            f"Cannot add {segment} which is a child of segment {trimmed_segment}"
                        )

        new_fog_prefixes.update([old_prefix + segment for segment in sub_segments])
        return self._new_trie_fog(new_fog_prefixes)

    def mark_all_complete(self, prefix_inputs: Sequence[NibblesInput]) -> 'HexaryTrieFog':
        """
        These might be leaves, or prefixes with 0 unknown keys within the range.

        This is equivalent to the following, but with better performance:

            result_fog = old_fog
            for complete_prefix in prefixes:
                result_fog = result_fog.explore(complete_prefix, ())
        """
        new_unexplored_prefixes = self._unexplored_prefixes.copy()
        for prefix in map(Nibbles, prefix_inputs):
            if prefix not in new_unexplored_prefixes:
                raise ValidationError(
                    f"When marking {prefix} complete, could not find in {new_unexplored_prefixes!r}"
                )

            new_unexplored_prefixes.remove(prefix)
        return self._new_trie_fog(new_unexplored_prefixes)

    def nearest_unknown(self, key_input: NibblesInput = ()) -> Nibbles:
        """
        Find the foggy prefix that is nearest to the supplied key.

        If prefixes are exactly the same distance to the left and right,
        then return the prefix on the right.

        :raises PerfectVisibility: if there are no foggy prefixes remaining
        """
        key = Nibbles(key_input)

        index = self._unexplored_prefixes.bisect(key)

        if index == 0:
            # If sorted set is empty, bisect will return 0
            # But it might also return 0 if the search value is lower than the lowest existing
            try:
                return self._unexplored_prefixes[0]
            except IndexError as exc:
                raise PerfectVisibility("There are no more unexplored prefixes") from exc
        elif index == len(self._unexplored_prefixes):
            return self._unexplored_prefixes[-1]
        else:
            nearest_left = self._unexplored_prefixes[index - 1]
            nearest_right = self._unexplored_prefixes[index]

            # is the left or right unknown prefix closer?
            left_distance = self._prefix_distance(nearest_left, key)
            right_distance = self._prefix_distance(key, nearest_right)
            if left_distance < right_distance:
                return nearest_left
            else:
                return nearest_right

    def nearest_right(self, key_input: NibblesInput) -> Nibbles:
        """
        Find the foggy prefix that is nearest on the right to the supplied key.

        :raises PerfectVisibility: if there are no foggy prefixes to the right
        """
        key = Nibbles(key_input)

        index = self._unexplored_prefixes.bisect(key)

        if index == 0:
            # If sorted set is empty, bisect will return 0
            # But it might also return 0 if the search value is lower than the lowest existing
            try:
                return self._unexplored_prefixes[0]
            except IndexError as exc:
                raise PerfectVisibility("There are no more unexplored prefixes") from exc
        else:
            nearest_left = self._unexplored_prefixes[index - 1]

            # always return nearest right, unless prefix of key is unexplored
            if key_starts_with(key, nearest_left):
                return nearest_left
            else:
                try:
                    # This can raise a IndexError if index == len(unexplored prefixes)
                    return self._unexplored_prefixes[index]
                except IndexError as exc:
                    raise FullDirectionalVisibility(
                        f"There are no unexplored prefixes to the right of {key}"
                    ) from exc

    @staticmethod
    @to_tuple
    def _prefix_distance(low_key: Nibbles, high_key: Nibbles) -> Iterable[int]:
        """
        How far are the two keys from each other, as a sequence of differences.
        The first non-zero distance must be positive, but the remaining distances may
        be negative. Distances are designed to be simply compared, like distance1 < distance2.

        The high_key must be higher than the low key, or the output distances are not
        guaranteed to be accurate.
        """
        for low_nibble, high_nibble in zip_longest(low_key, high_key, fillvalue=None):
            if low_nibble is None:
                final_low_nibble = 15
            else:
                final_low_nibble = low_nibble

            if high_nibble is None:
                final_high_nibble = 0
            else:
                final_high_nibble = high_nibble

            # Note: this might return a negative value. It's fine, because only the
            #   relative distance matters. For example (1, 2) and (2, 1) produce a
            #   distance of (1, -1). If the other reference point is (3, 1), making
            #   the distance to the middle (1, 0), then the "correct" thing happened.
            #   The (1, 2) key is a tiny bit closer to the (2, 1) key, and a tuple
            #   comparison of the distance will show it as a smaller distance.
            yield final_high_nibble - final_low_nibble

    @classmethod
    def _new_trie_fog(cls, unexplored_prefixes: SortedSet) -> 'HexaryTrieFog':
        """
        Convert a set of unexplored prefixes to a proper HexaryTrieFog object.
        """
        copy = cls()
        copy._unexplored_prefixes = unexplored_prefixes
        return copy

    def serialize(self) -> bytes:
        # encode nibbles to a bytes value, to compress this down a bit
        prefixes = [
            encode_nibbles(nibbles)
            for nibbles in self._unexplored_prefixes
        ]
        return f"HexaryTrieFog:{prefixes!r}".encode()

    @classmethod
    def deserialize(cls, encoded: bytes) -> 'HexaryTrieFog':
        serial_prefix = b'HexaryTrieFog:'
        if not encoded.startswith(serial_prefix):
            raise ValueError(f"Cannot deserialize this into HexaryTrieFog object: {encoded!r}")
        else:
            encoded_list = encoded[len(serial_prefix):]
            prefix_list = ast.literal_eval(encoded_list.decode())
            deserialized_prefixes = SortedSet(
                # decode nibbles from compressed bytes value, and validate each value in range(16)
                Nibbles(decode_nibbles(prefix))
                for prefix in prefix_list
            )
            return cls._new_trie_fog(deserialized_prefixes)

    def __eq__(self, other: Any) -> bool:
        if not isinstance(other, HexaryTrieFog):
            return False
        else:
            return self._unexplored_prefixes == other._unexplored_prefixes
Example #8
0
def test_bisect():
    temp = SortedSet(range(100))
    temp._reset(7)
    assert all(temp.bisect_left(val) == val for val in range(100))
    assert all(temp.bisect(val) == (val + 1) for val in range(100))
    assert all(temp.bisect_right(val) == (val + 1) for val in range(100))
Example #9
0
def approximate_events(W, E, delta):
    """
    Approximation method reducing the number of distinct event times while preserving connectivity properties
    of the original dataset.

    :param W:
    :param E:
    :param delta:
    :return:
    """
    # Seems strange but avoid float imprecision
    event_times = sorted(
        set([t for np in W.values()
             for t in np] + [t for lp in E.values() for t in lp]))
    t_old = event_times[0]
    discretized_event_times = SortedSet()
    discretized_event_times.add(t_old)
    for t in event_times:
        if t - t_old >= delta:
            discretized_event_times.add(t)
            t_old = t

    new_W = {}
    for n, np in W.items():
        new_W[n] = []
        for t0, t1 in zip(np[::2], np[1::2]):
            assert t1 - t0 >= delta
            if t0 not in discretized_event_times:
                # # Catch time after t0 in discretized event times:
                t0 = discretized_event_times[discretized_event_times.bisect(
                    t0)]
            if t1 not in discretized_event_times:
                # #Catch time before t1 in discretize event times:
                t1 = discretized_event_times[discretized_event_times.bisect(t1)
                                             - 1]

            # # new_W[n] += [t0, t1]
            # a, b = delta * math.ceil(t0 / delta), delta * math.floor(t1 / delta)
            #
            # if math.isclose(a, t0):
            #     a = t0
            # if math.isclose(b, t1):
            #     b = t1
            new_W[n] += [t0, t1]

    new_E = {}
    for l, lp in E.items():
        new_E[l] = []
        for t0, t1 in zip(lp[::2], lp[1::2]):
            assert t1 - t0 >= delta
            if t0 not in discretized_event_times:
                # # Catch time after t0 in discretized event times:
                t0 = discretized_event_times[discretized_event_times.bisect(
                    t0)]
            if t1 not in discretized_event_times:
                # # Catch time before t1 in discretize event times:
                t1 = discretized_event_times[discretized_event_times.bisect(t1)
                                             - 1]

            # new_E[l] += [t0, t1]
            # a, b = delta * math.ceil(t0 / delta), delta * math.floor(t1 / delta)
            # if math.isclose(a, t0):
            #     a = t0
            # if math.isclose(b, t1):
            #     b = t1
            new_E[l] += [t0, t1]
    return new_W, new_E