def solve_binary_pick_k(self, show_spent_time: bool = True) -> list:
     """
     借助Binary Search的思想,每次從post_list中N篇文章中找出相對排序最前面1個,重複k次
     時間複雜度 O(k log N)
     空間複雜度 O(N)
     Returns:
         [list]: Top-K articles
     """
     if show_spent_time:
         start = time.time()
     top_k_posts_list = []
     n_picks = 0
     post_set = IndexedSet(self.post_list)
     # 一個重複k次的迴圈,時間複雜度 O(k)
     while n_picks < self.top_k:
         # binary pick,採Divide and Conquer,時間複雜度 O(log N)
         curr_best_post = binary_pick(
             self.ranker,
             post_set, left=0,
             right=len(post_set) - 1)
         top_k_posts_list.append(curr_best_post)  # 加入元素到list 時間複雜度 O(1)
         # 從IndexableSet移除元素,時間複雜度O(1) (HashTable)
         post_set.remove(curr_best_post)
         n_picks += 1
     if show_spent_time:
         spent = (time.time() - start) * 1000
         print(self)
         print(f'spent time {spent:.2f} ms')
     return top_k_posts_list
Exemplo n.º 2
0
def find_instruction_order(instructions: OrderedMultiDict):
    starting_instruction = set(instructions.keys(multi=True)).difference(
        set(instructions.values(multi=True)))
    worker_manager = WorkerManager(offset=0)
    instruction_order = IndexedSet()
    current_instructions = list()
    for index, value in enumerate(sorted(starting_instruction)):
        current_instructions.append(value)
        worker_manager.add(value)
    current_instructions = sorted(current_instructions)
    while current_instructions:
        index = -1
        for i, current_instruction in enumerate(current_instructions):
            if not {
                    key
                    for key, value in instructions.items(multi=True)
                    if value == current_instruction
            }.difference(instruction_order):
                instruction_order.add(current_instruction)
                worker_manager.remove(current_instruction)
                for n, value in enumerate(
                        instructions.getlist(current_instruction)):
                    if value not in current_instructions:
                        current_instructions.append(value)
                index = i
                break
        if index != -1:
            current_instructions.pop(index)
        current_instructions = sorted(current_instructions)
        for instruction in current_instructions:
            worker_manager.add(instruction)
    print("".join(instruction_order))
    print(worker_manager.start_time)
Exemplo n.º 3
0
    def __init__(self, code: typing.List[str], filename: str,
                 primary_function: str):
        self.code = code
        self._generated_code = []
        self._node_num = -1
        self._reset = False
        self.filename = filename
        self.shader_name = self.filename.split("/")[-1].split(".")[0]
        self.functions = []
        self.imports = IndexedSet()
        self.connected_code = IndexedSet()
        self.parse_time = 0
        self._primary_func_name = primary_function
        self.primary_function: GLSLFunction = None
        self._uniforms = []

        self._arguments_to_add_to_primary = []

        # Track line numbers of important parts of code
        self._functions_start_line = -1
        self._primary_func_start_line = -1

        self._parse()
        self._set_primary_function()
        self._add_args_to_primary()
Exemplo n.º 4
0
def test_indexed_set_mutate():
    thou = IndexedSet(range(1000))
    assert (thou.pop(), thou.pop()) == (999, 998)
    assert (thou.pop(499), thou.pop(499)) == (499, 500)

    ref = [495, 496, 497, 498, 501, 502, 503, 504, 505, 506]
    assert [thou[i] for i in range(495, 505)] == ref

    assert len(thou) == 996
    while len(thou) > 600:
        dead_idx_len = len(thou.dead_indices)
        dead_idx_count = thou._dead_index_count
        thou.pop(0)
        new_dead_idx_len = len(thou.dead_indices)
        if new_dead_idx_len < dead_idx_len:
            assert dead_idx_count > 0
            # 124, 109, 95
    assert len(thou) == 600
    assert thou._dead_index_count == 67

    assert not any([thou[i] is _MISSING for i in range(len(thou))])

    thou &= IndexedSet(range(500, 503))

    assert thou == IndexedSet([501, 502])
    return
Exemplo n.º 5
0
    def __init__(self, parent_node: 'GShaderNode', socket: NodeSocket):
        super().__init__(parent=parent_node)
        self._socket = socket
        self._socket.set_container(self)
        self._parent_g_node = parent_node
        self._connected_g_edges = IndexedSet()

        # Define socket properties
        self._circle_connected_brush = QColor(
            255, 130, 0,
            255) if self._socket.type() == NodeSocket.INPUT else QColor(
                130, 255, 0, 255)
        self._circle_disconnected_brush = QColor(
            102, 50, 0,
            255) if self._socket.type() == NodeSocket.INPUT else QColor(
                50, 102, 0, 255)
        self._circle_hover_brush = QColor(
            170, 130, 0,
            255) if self._socket.type() == NodeSocket.INPUT else QColor(
                130, 170, 0, 255)
        self._border_connected_brush = QPen(QColor(255, 255, 255, 255))
        self._border_disconnected_brush = QPen(Qt.black)
        self._circle_brush = self._circle_disconnected_brush
        self._border_brush = self._border_disconnected_brush
        self._bbox = QRectF(0, 0, 10, 10)
        self._moving_edge = False
        self._current_edge = None
        self._layout = QGraphicsLinearLayout(Qt.Horizontal)

        self._init_socket()
Exemplo n.º 6
0
 def __init__(self, logfile, buffs, regex):
     print(regex)
     self.logfile = logfile
     self.bufflist = buffs
     self.rx = re.compile(regex)
     self.windowName = "EverQuest"
     self.buffqueue = IndexedSet()
Exemplo n.º 7
0
 def __init__(self, name="Sub-Topology", parent=None):
     if name is not None:
         self._name = str(name)
     if parent is None:
         self._parent = parent
     else:
         self._parent = _validate_parent(parent)
     self._sites = IndexedSet()
Exemplo n.º 8
0
    def get_output_target_nodes(self) -> IndexedSet:
        """Returns a set of all nodes connected to this nodes output socket."""
        out = IndexedSet()

        for socket in self.get_output_sockets():
            if socket.is_connected():
                out.update(socket.get_connected_nodes())

        return out
Exemplo n.º 9
0
 def get_ancestor_nodes(self, add_self: bool = False) -> IndexedSet:
     """
     Returns a list of all connected ancestors of this node.
     :param add_self: if True, adds this node to the set of returned nodes.
     :return: a Set of nodes that are ancestors of this node.
     """
     nodes = self._node.get_ancestor_nodes(add_self=add_self)
     out = IndexedSet()
     for n in nodes:
         out.add(n.get_container())
     return out
Exemplo n.º 10
0
 async def dist_cards(self, cards: List[Card], player_number: int):
     self.player_number = player_number
     # using intersection & slicing
     self.hand = IndexedSet(cards)
     self.stash = list()
     self.extplayer.init_player(player_number)
     bigTichu = await self.extplayer.dist_first(self.hand[0:8])
     if bigTichu.event == TEventType.BigTichu:
         await self.server.put(bigTichu)
     smallTichu = await self.extplayer.dist_second(self.hand[9:14])
     if smallTichu.event == TEventType.SmallTichu:
         await self.server.put(smallTichu)
Exemplo n.º 11
0
 def __init__(self, parent_node: 'Node', socket_type: SocketType, dtype: DataType = None, label: str = "", container=None):
     super().__init__(container)
     assert isinstance(socket_type, SocketType), "socket_type must be of enum type SocketType!"
     self._parent_g_node = parent_node
     self._type = socket_type
     self._dtype = dtype
     self._label = label
     self._id = uuid.uuid4()
     self._index = -1
     self._value = None
     self._saved_value = None
     self._connected = False
     self._connected_sockets = IndexedSet()
     self._connected_edges = IndexedSet()
Exemplo n.º 12
0
def search():
    context = dict()
    try:
        if request.args:
            key_word = request.args["key"]
            df = pd.read_csv(Name_File_Path)
            search_result = []
            for idx in range(len(key_word) - 2):
                search_q = key_word[:len(key_word) - idx]
                r_list, used_word = get_search_result(df, "Name",
                                                      search_q.lower())
                search_result.extend(r_list)
                index = df["Name"][used_word == True].index
                df = df.drop(index=index, axis=0)
            result = list(IndexedSet(search_result))
            if not result:
                context["message"] = "No Result Found!"
            else:
                context["search_result"] = result
            return jsonify(context)
        else:
            return render_template("index.html", context=context)
    except Exception as e:
        return render_template("index.html",
                               context={
                                   "error_message": str(e),
                                   "message": "Internal server error"
                               })
Exemplo n.º 13
0
 def __init__(self, expectations=None):
     super(ArsdkMultipleExpectation, self).__init__()
     if expectations is None:
         self.expectations = []
     else:
         self.expectations = expectations
     self.matched_expectations = IndexedSet()
Exemplo n.º 14
0
 def __init__(self, expectations=None):
     super().__init__()
     if expectations is None:
         self.expectations = []
     else:
         self.expectations = expectations
     self.matched_expectations = IndexedSet()
Exemplo n.º 15
0
 def store_selection(self):
     """Remember to call it on "ok", to persist history of selected paths."""
     print("Storing selection...")
     if self.stkey_last_path:
         sect, key = self.stkey_last_path.split('.')
         store_put(sect, key, self.path)
     if self.stkey_paths_hist:
         sect, key = self.stkey_paths_hist.split('.')
         try:
             paths_hist = IndexedSet(store_get(sect, key, []))
             paths_hist.add(self.path)
             paths_hist = list(
                 reversed(paths_hist))  # earlier elements at list-head
         except:
             paths_hist = [self.path]
         store_put(sect, key, paths_hist)
Exemplo n.º 16
0
def big_popper():
    # more of a benchmark than a test
    from os import urandom
    import time
    big_set = IndexedSet(range(100000))
    rands = [ord(r) for r in urandom(len(big_set))]
    start_time, start_size = time.time(), len(big_set)
    while len(big_set) > 10000:
        if len(big_set) % 10000 == 0:
            print(len(big_set) / 10000)
        rand = rands.pop()
        big_set.pop(rand)
        big_set.pop(-rand)
    end_time, end_size = time.time(), len(big_set)
    print()
    print('popped %s items in %s seconds' % (start_size - end_size,
                                             end_time - start_time))
Exemplo n.º 17
0
def test_indexed_set_mutate():
    thou = IndexedSet(range(1000))
    assert (thou.pop(), thou.pop()) == (999, 998)
    assert (thou.pop(499), thou.pop(499)) == (499, 500)

    ref = [495, 496, 497, 498, 501, 502, 503, 504, 505, 506]
    assert [thou[i] for i in range(495, 505)] == ref

    assert len(thou) == 996
    while len(thou) > 600:
        dead_idx_len = len(thou.dead_indices)
        dead_idx_count = thou._dead_index_count
        thou.pop(0)
        new_dead_idx_len = len(thou.dead_indices)
        if new_dead_idx_len < dead_idx_len:
            assert dead_idx_count > 0
            # 124, 109, 95
    assert len(thou) == 600
    assert thou._dead_index_count == 67

    assert not any([thou[i] is _MISSING for i in range(len(thou))])

    thou &= IndexedSet(range(500, 503))

    assert thou == IndexedSet([501, 502])
    return
Exemplo n.º 18
0
    def __init__(self, cards: Collection[Card]):
        self.cards: IndexedSet = IndexedSet(cards)
        le = len(cards)
        if le < 5:
            raise ValueError('At least five cards are needed for a '
                             'straight. Only {} were given'.format(le))
        if dog in cards:
            raise ValueError('Dog not usable in Patterns')

        self.cardinality = len(cards)

        # Check consistency of given cards!

        sorted_cards = sorted(cards, key=Card.rank)
        sorted_cards_iter = iter(sorted_cards)
        first_card = next(sorted_cards_iter)
        self.rank = first_card.rank
        phx_avail = has_phoenix(sorted_cards)

        redundant_ranks = []
        self.phx_rank = None

        last_rank = self.rank
        buf = []
        for current_card in sorted_cards_iter:
            if current_card.special == Special.phoenix:
                continue
            diff = current_card.rank - last_rank
            if diff == 0:
                redundant_ranks.append(current_card.rank)
            elif diff == 1:
                last_rank = current_card.rank
            elif diff == 2 and phx_avail:
                phx_avail = False
                last_rank = current_card.rank
            else:
                raise ValueError("Inconsistent Straight")

        # if phx was not used to build the straight
        # it can make the straight longer
        if phx_avail:
            # append phx to the end (if not already ace)
            if last_rank < 14:
                last_rank += 1
                self.phx_rank = last_rank
            elif self.rank > 2:
                self.rank -= 1
                self.phx_rank = self.rank

        self.redundant_cards = []; self.essential_cards = []
        for c in cards:
            if c.rank in redundant_ranks:
                self.redundant_cards.append(c)
            else:
                self.essential_cards.append(c)
        self.highest = last_rank
        self.cardinality = self.highest - self.rank + 1
Exemplo n.º 19
0
def dedupe(*input_files):
    """ Takes file descriptors and return deduplicated content. """

    # Parse and merge all files entries.
    results = chain.from_iterable(imap(parse_history, input_files))

    # Deduplicate entries sharing the same timestamp by removing all previous
    # occurences, only keeping the last one. A reverse IndexedSet let us keep
    # entries ordered by their encounter. This is important, especially to keep
    # together timestamp-less entries coming from the same file.
    results = IndexedSet(list(results)[::-1])
    results.reverse()

    # Sort entries by timestamps.
    entries = []
    for timestamp, cmd in sorted(results, key=itemgetter(0)):
        entries.append("#{}\n{}".format(timestamp, cmd))

    return '\n'.join(entries)
Exemplo n.º 20
0
    def __init__(self,
                 name='Site',
                 position=None,
                 charge=None,
                 mass=None,
                 element=None,
                 atom_type=None):
        if name is not None:
            self.name = str(name)
        if name is None:  # Guardrail against checking deliberate None
            self.name = 'Site'
        if position is None:
            self.position = u.nm * np.zeros(3)
        else:
            self.position = _validate_position(position)

        self._element = element
        self._atom_type = _validate_atom_type(atom_type)
        self._charge = _validate_charge(charge)
        self._mass = _validate_mass(mass)
        self._connections = IndexedSet()
Exemplo n.º 21
0
    def convert_hbih_to_gmusic(self, url):
        hbih_list = HBIHPlaylist(url)
        title = hbih_list.title
        store_ids = []
        for item in hbih_list.items:
            album_store_ids = self.get_best_album_match(item[0], item[1])
            print("Adding store ids: {0}".format(album_store_ids))
            store_ids.extend(album_store_ids)

        store_id_set = IndexedSet(store_ids)
        no_dupes_store_ids = list(store_id_set)
        new_plist = self.create_playlist(title, no_dupes_store_ids[0:1000])
        return self.share_playlist(new_plist)
Exemplo n.º 22
0
    def _consume_next_xlref(self, xlref, lasso):
        """
        :param str xlref:
                an xlref that may not contain hash(`#`); in that case,
                it is taken as *file-part* or as *fragment-part* depending
                on the existence of prev lasso's `url_file`.
        :param Lasso lasso:
                reuses `url_file` & `sheet` if missing from xlref
        """

        xlref = _guess_xlref_without_hash(xlref,
                                          bias_on_fragment=bool(
                                              lasso.url_file))
        lasso = xleash.lasso(xlref,
                             sheets_factory=self._sheets_factory,
                             url_file=lasso.url_file,
                             sheet=lasso.sheet,
                             return_lasso=True)
        values = lasso.values
        if values:  # Skip blank sheets.
            # TODO: Convert column monkeybiz into pure-pandas using xleash.
            str_row_indices = [
                i for i, r in enumerate(values) if any(
                    isinstance(v, str) for v in r)
            ]

            req_labels = IndexedSet(self.required_labels)
            for k in str_row_indices:
                if set(values[k]) >= req_labels:
                    break
            else:
                raise CmdException(
                    "Columns %r not found in table of sheet(%r) in book(%r)!" %
                    (self.required_labels, lasso.sheet._sheet.name,
                     lasso.sheet.book_fname))
            ix = values[k]
            i = max(str_row_indices, default=0) + 1

            h = pd.DataFrame(values[:i], columns=ix)
            self.headers.append((sheet_name(lasso), k, h))

            values = pd.DataFrame(values[i:], columns=ix)
            values.dropna(how='all', inplace=True)
            values.dropna(axis=1, how='any', inplace=True)
            if values.empty:
                log.warning("Empty table of sheet(%r) in book (%r)!" %
                            (lasso.sheet._sheet.name, lasso.sheet.book_fname))
            else:
                self.tables.append(values)

        return lasso
Exemplo n.º 23
0
def paramaterize_cases(
    cases: List[ParamCase],
    default_values: Optional[ParamaterizationVars] = None,
    fill_none: bool = True,
    fill_default: bool = False,
) -> ParamaterizationArgs:
    """Create paramaterization from a set of named cases."""
    _default = default_values or {}

    # Build the list of paramaterized values
    argnames = IndexedSet()
    if fill_default:
        argnames |= IndexedSet(_default.keys())

    # Build list case ids, values, and add any argnames that weren't in default
    ids = []
    case_values = []
    for case in cases:
        ids.append(case.id)
        argnames |= IndexedSet(case.variables.keys())
        case_values.append(case.variables)

    # Convert argnames to a list once we have identified all posibilities
    argnames = list(argnames)

    # Build list of tuples of values
    argvalues = [
        tuple([
            _get_from_lists(var, [values, _default], raise_error=not fill_none)
            for var in argnames
        ]) for values in case_values
    ]

    return ParamaterizationArgs(
        argnames=argnames,
        argvalues=argvalues,
        ids=ids,
    )
Exemplo n.º 24
0
    def build_structure(self, decision_table: DecisionTable) -> [str]:
        """
Builds meta representation structure based on the dictionary representation of the structure
        :param decision_table:
        :return:
        """
        structure_dict = self.build_structure_dict(decision_table)
        predicate_names = structure_dict.keys()
        structure: [str] = []
        for predicate in predicate_names:
            value_list = structure_dict[predicate]
            # remove doubles
            unique_value_list = list(IndexedSet(value_list))
            values_string = '; '.join(unique_value_list)
            structure.append(predicate + " = {" + values_string + "}")
        return structure
Exemplo n.º 25
0
    def autocomplete(self, request):
        key = request.query_params.get('key', None)
        trie = OurRepo.getInstance().our_trie_root
        comp = trie.all_suggestions(key)
        if comp == 1:
            response = trie.words
            print(' 1: The type of response is: ' + str(type(response)))
            grand_response = []
            for element in response:
                grand_response.append(element.get("word"))

            print(grand_response)
            count = response.__len__()
            response_ = None
            if response.__len__() < MAX_COUNT:
                print('Reached here')
                response_ = trie.search_with_typo(2, key)
                response_ = list(dict(response_).keys())

            if response_ is not None:
                for key in response_:
                    if count == MAX_COUNT:
                        break
                    grand_response.append(key)
                    count += 1
        else:
            print("2:: ")
            length = 1
            count = 0
            grand_response = []
            while grand_response.__len__() < MAX_COUNT:
                response = None
                while response is None:
                    response = trie.search_with_typo(length, key)
                    length += 1

                response = list(dict(response).keys())
                for element in response:
                    if count == MAX_COUNT:
                        break
                    grand_response.append(element)
                    count += 1

            for result in response:
                print(result)
        response__ = list(IndexedSet(grand_response))
        return Response(response__, status=status.HTTP_200_OK)
Exemplo n.º 26
0
def test_indexed_set_basic():
    zero2nine = IndexedSet(range(10))
    five2nine = zero2nine & IndexedSet(range(5, 15))
    x = IndexedSet(five2nine)
    x |= set([10])

    assert list(zero2nine) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
    assert set(zero2nine) == set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
    assert list(five2nine) == [5, 6, 7, 8, 9]
    assert x == IndexedSet([5, 6, 7, 8, 9, 10])
    assert x[-1] == 10

    assert zero2nine ^ five2nine == IndexedSet([0, 1, 2, 3, 4])

    assert x[:3] == IndexedSet([5, 6, 7])
    assert x[2:4:-1] == IndexedSet([8, 7])
Exemplo n.º 27
0
    def by_external_id(self, external_id, record_types=None):
        '''return any resources fetched from the 'by-external-id' route.

Note: while the route will return differently depending on how many records are returned,
this method deliberately flattens that out - it will _always_ return a generator, even if only
one record is found.'''
        params = {"eid": external_id}
        if record_types: params['type[]'] = record_types

        res = self.client.get('by-external-id', params=params)
        if res.status_code == 404:
            return []
        elif res.status_code == 300: # multiple returns, bare list of uris
            yield from (wrap_json_object({"ref": uri}, self.client) for uri in IndexedSet(res.json()))
        elif res.status_code == 200: # single obj, redirects to obj with 303->200
            yield wrap_json_object(res.json(), self.client)
        else:
            raise ASnakeBadReturnCode("by-external-id call returned '{}'".format(res.status_code))
Exemplo n.º 28
0
    def generate_code(self) -> str:
        if not self._reset:
            _logger.error(
                "generate_code() called before the code has been reset. Call reset() first."
            )
            return ""

        # Step 1, copy all lines from the code up until the first function
        self._generated_code = self.code[0:self._functions_start_line].copy()

        # Step 2, insert uniforms for all connected nodes for each unconnected argument
        for needed_code in self.connected_code:
            needed_code._reset = False
            prim_func = needed_code.get_primary_function()
            for arg in prim_func.arguments:
                if not arg.is_connected(
                ) and arg.name != "frag_pos" and not arg.is_output():
                    self._generated_code.append(arg.get_uniform_string() +
                                                "\n")
                    self._uniforms.append((arg.type, arg.modified_name))

        # Step 3, handle imports
        all_imports = IndexedSet([
            im for n in self.connected_code for im in n.imports
        ])  # Convert to set to remove double imports
        for import_file in all_imports:
            print("Import File: {}".format(import_file))
            libcode = generate_comment_line(
                import_file) + "\n" + get_import_code(import_file)
            self._generated_code.append(libcode + "\n")

        # Step 4, import code from connected nodes
        var_declarations = []
        function_calls = []
        added_function_defs = []
        _add_calls(self, var_declarations, function_calls, added_function_defs,
                   self._generated_code)

        # Step 5, generate calls to connected functions in own primary function
        self.primary_function.add_calls(function_calls, var_declarations)
        primary_code = "".join(self.primary_function.generated_code)
        self._generated_code.append(primary_code)
        self._reset = False
        return "".join(self._generated_code)
Exemplo n.º 29
0
    def get_ancestor_nodes(self, add_self: bool = False) -> IndexedSet:
        """
        Returns a list of all connected ancestors of this node.
        :param add_self: if True, adds this node to the set of returned nodes.
        :return: a Set of nodes that are ancestors of this node.
        """
        out = IndexedSet()
        if add_self:
            out.add(self)

        for socket in self.get_input_sockets():
            if socket.is_connected():
                for node in [n for n in socket.get_connected_nodes()]:
                    out.update(node.get_ancestor_nodes(add_self=True))

        return out
Exemplo n.º 30
0
def get_known_casrns(cid):
    """
    Get PubChem's specifically designated CASRNs for a given CID.

    The PubChem API can return a special section of data dedicated to CASRNs.
    This is not (yet) documented in the offical API specification.
    This method may need to change as PubChem improves access to CASRNs.

    We compile CASRNs in a container object that preserves their order when
    more are added, as in :func:`get_compound_info`.

    Parameters:
        cid (str or int): PubChem Compound ID.

    Returns:
        :class:`boltons.setutils.IndexedSet`: "Known" CASRNs.
    """
    url = PUG_VIEW_BASE + '/data/compound/{0}/JSON?heading=CAS'.format(cid)
    logger.debug('Getting known CASRNs for CID %s. Request URL: %s', cid, url)
    req = requests.get(url)
    data = req.json()
    casrns = IndexedSet()

    def visit(path, key, value):
        if value['Name'] == 'CAS':
            if casrnutils.validate(value['StringValue'], boolean=True):
                casrns.add(value['StringValue'])
            else:
                logger.debug('Found invalid CASRN [%s] for CID %s',
                             value['StringValue'], cid)
        return False

    try:
        remap(data, visit=visit, reraise_visit=False)
    except LookupError:
        logger.error('Failed to retrieve known CASRNs for CID %s', cid)

    logger.debug('Found %i known CASRNs for CID %s', len(casrns), cid)
    return casrns
Exemplo n.º 31
0
def big_popper():
    # more of a benchmark than a test
    from os import urandom
    import time
    big_set = IndexedSet(range(100000))
    rands = [ord(r) for r in urandom(len(big_set))]
    start_time, start_size = time.time(), len(big_set)
    while len(big_set) > 10000:
        if len(big_set) % 10000 == 0:
            print(len(big_set) / 10000)
        rand = rands.pop()
        big_set.pop(rand)
        big_set.pop(-rand)
    end_time, end_size = time.time(), len(big_set)
    print()
    print('popped %s items in %s seconds' % (start_size - end_size,
                                             end_time - start_time))
Exemplo n.º 32
0
    def convert(self,
                decision_tables: [DecisionTable]) -> ([str], [str], [str]):
        vocabulary = self.build_vocabulary()
        theory = self.build_theory()

        structure_dictionaries = [
            self.build_structure_dict(decision_table)
            for decision_table in decision_tables
        ]
        predicate_names = structure_dictionaries[0].keys()
        # Loop over every predicate and only keep unique entries for those predicates
        structure: [str] = []
        for predicate in predicate_names:
            value_list = [
                structure_dictionary[predicate]
                for structure_dictionary in structure_dictionaries
            ]
            flat_value_list = list(itertools.chain.from_iterable(value_list))
            # remove doubles
            unique_value_list = list(IndexedSet(flat_value_list))
            values_string = '; '.join(unique_value_list)
            structure.append(predicate + " = {" + values_string + "}")

        return vocabulary, theory, structure