コード例 #1
0
    def from_message(cls, message, measure):
        pairs = [''.join(T) for T in zip(message[::2], message[1::2])]

        measure_split = len(pairs)
        enumerated_pairs = zip(iter_count(), pairs)

        result = [
            cls(datum, measure, position, measure_split)
            for position, datum in enumerated_pairs if datum != '00'
        ]

        return result
コード例 #2
0
    def run(self):
        count = iter_count(1)
        self.__info(str(next(count)) + ' Prepare Data')
        self.Data = Data(file=self.File,
                         max_len=self.RnnMaxLen,
                         verbose=self.Verbose,
                         divide=self.Divide)

        self.__info(str(next(count)) + ' Build RNN Model')
        self.Rnn = RNN(max_len=self.RnnMaxLen, max_features=100000)

        self.__info(str(next(count)) + ' Build CNN Model')
        self.Cnn = CNN(side=self.Data.CnnSide)

        self.__info(str(next(count)) + ' Run RNN Model')
        self.RnnHistory = self.Rnn.fit(x=self.Data.RnnX,
                                       y=self.Data.RnnY,
                                       batch_size=self.RnnBatch,
                                       epochs=self.RnnEpoch,
                                       validation_split=0.2,
                                       verbose=self.Verbose)

        self.__info(str(next(count)) + ' Run CNN Model')
        self.CnnHistory = self.Cnn.fit(x=self.Data.CnnX,
                                       y=self.Data.CnnY,
                                       batch_size=self.CnnBatch,
                                       epochs=self.CnnEpoch,
                                       validation_split=0.2,
                                       verbose=self.Verbose)

        self.__info(str(next(count)) + ' Predict')
        self.PredictX = [814219, 42159, 141318, 48937, 248414]
        self.PredictY = []
        for item in self.PredictX:
            input = np.expand_dims(np.array(self.Data.CnnX[item]), axis=0)
            self.PredictY.append(self.Cnn.predict(input))

        self.__info(str(next(count)) + ' Make Plot')
        # self.__make_plot()

        self.__info(
            str(next(count)) + ' Save History and Configuration as HTML')
        self.__save()

        self.__info('Done')
コード例 #3
0
def _basis_search(equiv_lib, source_basis, target_basis, heuristic):
    """Search for a set of transformations from source_basis to target_basis.

    Args:
        equiv_lib (EquivalenceLibrary): Source of valid translations
        source_basis (Set[Tuple[gate_name: str, gate_num_qubits: int]]): Starting basis.
        target_basis (Set[gate_name: str]): Target basis.
        heuristic (Callable[[source_basis, target_basis], int]): distance heuristic.

    Returns:
        Optional[List[Tuple[gate, equiv_params, equiv_circuit]]]: List of (gate,
            equiv_params, equiv_circuit) tuples tuples which, if applied in order
            will map from source_basis to target_basis. Returns None if no path
            was found.
    """

    source_basis = frozenset(source_basis)
    target_basis = frozenset(target_basis)

    open_set = set()  # Bases found but not yet inspected.
    closed_set = set()  # Bases found and inspected.

    # Priority queue for inspection order of open_set. Contains Tuple[priority, count, basis]
    open_heap = []

    # Map from bases in closed_set to predecessor with lowest cost_from_source.
    # Values are Tuple[prev_basis, gate_name, params, circuit].
    came_from = {}

    basis_count = iter_count()  # Used to break ties in priority.

    open_set.add(source_basis)
    heappush(open_heap, (0, next(basis_count), source_basis))

    # Map from basis to lowest found cost from source.
    cost_from_source = defaultdict(lambda: np.inf)
    cost_from_source[source_basis] = 0

    # Map from basis to cost_from_source + heuristic.
    est_total_cost = defaultdict(lambda: np.inf)
    est_total_cost[source_basis] = heuristic(source_basis, target_basis)

    logger.debug("Begining basis search from %s to %s.", source_basis, target_basis)

    while open_set:
        _, _, current_basis = heappop(open_heap)

        if current_basis in closed_set:
            # When we close a node, we don't remove it from the heap,
            # so skip here.
            continue

        if {gate_name for gate_name, gate_num_qubits in current_basis}.issubset(target_basis):
            # Found target basis. Construct transform path.
            rtn = []
            last_basis = current_basis
            while last_basis != source_basis:
                prev_basis, gate_name, gate_num_qubits, params, equiv = came_from[last_basis]

                rtn.append((gate_name, gate_num_qubits, params, equiv))
                last_basis = prev_basis
            rtn.reverse()

            logger.debug("Transformation path:")
            for gate_name, gate_num_qubits, params, equiv in rtn:
                logger.debug("%s/%s => %s\n%s", gate_name, gate_num_qubits, params, equiv)
            return rtn

        logger.debug("Inspecting basis %s.", current_basis)
        open_set.remove(current_basis)
        closed_set.add(current_basis)

        for gate_name, gate_num_qubits in current_basis:
            if gate_name in target_basis:
                continue

            equivs = equiv_lib._get_equivalences((gate_name, gate_num_qubits))

            basis_remain = current_basis - {(gate_name, gate_num_qubits)}
            neighbors = [
                (
                    frozenset(
                        basis_remain
                        | {(inst.name, inst.num_qubits) for inst, qargs, cargs in equiv.data}
                    ),
                    params,
                    equiv,
                )
                for params, equiv in equivs
            ]

            # Weight total path length of transformation weakly.
            tentative_cost_from_source = cost_from_source[current_basis] + 1e-3

            for neighbor, params, equiv in neighbors:
                if neighbor in closed_set:
                    continue

                if tentative_cost_from_source >= cost_from_source[neighbor]:
                    continue

                open_set.add(neighbor)
                came_from[neighbor] = (current_basis, gate_name, gate_num_qubits, params, equiv)
                cost_from_source[neighbor] = tentative_cost_from_source
                est_total_cost[neighbor] = tentative_cost_from_source + heuristic(
                    neighbor, target_basis
                )
                heappush(open_heap, (est_total_cost[neighbor], next(basis_count), neighbor))

    return None
コード例 #4
0
ファイル: cdoc.py プロジェクト: matt-hayden/cutils
def document(infolder, outfolder, extension, loader, external_css=None,
             generate_toc=None, overwrite=False):
    # Get previously generated TOC object
    TOC = os_path_join(infolder, '.cdoc_toc')
    try:
        with open(TOC, 'rb') as file:
            old_toc = pickle_load(file)
    except (FileNotFoundError, EOFError):
        old_toc = table_Dict2D(OrderedDict)

    # Create new TOC object
    new_toc = table_Dict2D(OrderedDict)

    # TODO: do we really need a separate OrderedDict for pages ???
    pages = OrderedDict()
    anonym = iter_count()

    # TODO: Create real dependency graphs
    #       Document object:
    #           parents  = set()  # other documents depending on this document
    #           children = set()  # other documents this document depending on
    #
    #       If document changed:
    #           set all parents of document => changed
    #
    #       If any of its children changed:
    #           set all parents of child => changed
    #
    #       -- The loop should check if a document's change flag has already
    #          been set. If not, hash file, and set flag, and notify all
    #          dependencies (parents)

    # Load all pages
    with check_Checker(infolder, file='.cdoc_cache', lazy_update=True) as checker:
        # Go through all files
        for file in os_listdir(infolder):
            # If file has the proper extension
            if file.endswith(extension):
                # Create full file path
                filepath = os_path_join(infolder, file)
                # If file has been changed since last check
                if checker.ischanged(filepath) and not overwrite:
                    # Regenerate file
                    filename, pagename, depends = \
                        _process(infolder, file, filepath, pages, loader, anonym)
                # If file hasn't been changed
                else:
                    # If file has been cached before
                    try:
                        # Get previous infos
                        filename, depends = old_toc[filepath]
                        pagename = old_toc.otherkey(filepath)
                        pages[pagename] = None
                        # If any of the dependencies has changed
                        for dependency in depends:
                            if checker.ischanged(dependency) and not overwrite:
                                # Regenerate file
                                filename, pagename, depends = \
                                    _process(infolder, file, filepath, pages, loader, anonym)
                                break
                    # If file is new and hasn't been cached before
                    except KeyError:
                        # Generate it for the first time
                        filename, pagename, depends = \
                            _process(infolder, file, filepath, pages, loader, anonym)
                # Store new values
                new_toc[pagename:filepath] = filename, depends

    # If order changing, renaming, inserting, deleting, etc. happened
    if set(old_toc) - set(new_toc):
        for pagename, filepath in new_toc.keys():
            if pages[pagename] is None:
                _process(os_path_basename(filepath), filepath, pages, loader, anonym)

    # Write back TOC object
    with open(TOC, 'wb') as file:
        pickle_dump(new_toc, file, pickle_HIGHEST_PROTOCOL)
    # Generate Table of Content?
    if generate_toc is None:
        generate_toc = len(new_toc) > 1
    # Create documents
    _build(pages, outfolder, generate_toc, new_toc, external_css)
コード例 #5
0
    def fit(self, X: pd.DataFrame, y):
        nfolds, n_repeats = self.num_cv_folds, self.num_cv_repeats
        kfolds = RepeatedStratifiedKFold(n_splits=nfolds, n_repeats=n_repeats)

        clf_args = dict(n_estimators=self.num_estimators,
                        n_jobs=-1,
                        precision_min=self.precision_min,
                        recall_min=self.recall_min,
                        max_depth_duplication=2,
                        max_depth=(1, 2, 3),
                        max_samples=0.8,
                        feature_names=X.columns)

        rules_per_fold = list()

        for nfold, (idx_train, idx_test) in enumerate(kfolds.split(X, y), 0):
            #print(f"Training on fold {nfold}")
            repeat, fold = 1 + nfold // nfolds, (nfold % nfolds) + 1
            print(f"Training on fold {fold} in repeat {repeat}: ", end="")

            #### obtain current training and test sets
            X_train, y_train = smote(X.iloc[idx_train], y[idx_train])
            X_test, y_test = X.iloc[idx_test], y[idx_test]

            #fit classifier
            clf = DualSkoper(**clf_args).fit(X_train, y_train)
            print("fitted!", flush=True)

            #get some performance stats
            y_pred = clf.predict(X_test)
            report, class_0, class_1 = fix_report(y_test, y_pred)

            self.reports.append(
                dict(n_fold=nfold,
                     **class_0,
                     **class_1,
                     accuracy=report.get("accuracy"),
                     cohen_k=cohen_kappa_score(y_test, y_pred)))

            #exract rules from classifier
            rule_set = list()

            for num_rules in iter_count(start=0, step=1):
                # prediction = clf.predict_top_rules(X_test, num_rules)
                # report, class_0, class_1 = fix_report(y_test, prediction)

                pos, neg = rule_pair = clf.get_ith_rule(num_rules)
                if not pos or not neg:
                    break
                rule_set.append(rule_pair)

                # self.rules_performances.append(dict(
                #     n_rules = num_rules,
                #     n_fold = nfold,
                #     **class_0,
                #     **class_1,
                #     accuracy = report.get("accuracy"),
                #     cohen_k = cohen_kappa_score(y_test, prediction),
                #     rule_pos = pos,
                #     rule_neg = neg
                # ))

            #aggiungo tutte le regole trovate al set
            self.ruleset.extend(rule_set)

            #validate rules against test set (filter those rules that don't perform well on the test set)
            # rules_per_fold.append(self.__validate(rule_set, X_test, y_test))
            # nrules = sum([len(x) for x in rules_per_fold])

            # print(f"Ending training on {repeat}-th {fold} fold: {nrules} rule{'' if nrules < 2 else 's'} selected so far.")

            # print(rules_per_fold[-1])

        if False:
            print("rules_per_fold content:\n")
            for nnn, content in enumerate(rules_per_fold, 1):
                print(f"Fold #{nnn}")
                for rule in content:
                    print(rule)
                print()
コード例 #6
0
class Node:
    ''' This class represents a node in a topology search graph. Each node has 
    a unique id, a list of sinks (essentially a map), and the bounding box of 
    its movable region.
    '''
    id_generator = iter_count(0)

    def __init__(self, sink_set, llx=None, urx=None, lly=None, ury=None):
        self.sink_dict = {s.name: s for s in sink_set}

        self.llx, self.urx = llx, urx
        self.lly, self.ury = lly, ury

        self.id = next(Node.id_generator)

    def add_sink(self, s):
        ''' Add a sink to the sink_dict. '''
        self.sink_dict[s.name] = s

    def remove_sinks(self, sinks):
        ''' Remove sinks from the node. '''
        for s in sinks:
            if s.name in self.sink_dict:
                del self.sink_dict[s.name]
            else:
                pass  # the sink is not included in this node.

    def remove_sinks_by_name_set(self, name_set):
        ''' Remove sinks (given by a set of sink names) from the node. '''
        for name in name_set:
            if name in self.sink_dict:
                del self.sink_dict[name]
            else:
                pass  # the sink is not included in this node.

    def update_movable_region(self, source):
        ''' Update movable region of this node. '''
        llx_list, lly_list = [source.llx], [source.lly]
        urx_list, ury_list = [source.urx], [source.ury]

        llx_list.extend([s.llx for s in self.sink_dict.values()])
        lly_list.extend([s.lly for s in self.sink_dict.values()])
        urx_list.extend([s.urx for s in self.sink_dict.values()])
        ury_list.extend([s.ury for s in self.sink_dict.values()])

        self.llx, self.lly = max(llx_list), max(lly_list)
        self.urx, self.ury = min(urx_list), min(ury_list)

    @property
    def area(self):
        ''' Area of the movable region defined for this Node. '''
        try:
            return (self.urx - self.llx) * (self.ury - self.lly)
        except TypeError:
            return 0

    @property
    def sink_name_list(self):
        return sorted(list(self.sink_dict.keys()))

    @property
    def sink_set(self):
        return set(self.sink_dict.values())

    @property
    def name(self):
        return '&&'.join(self.sink_name_list)

    def __repr__(self):
        try:
            return "Node(name=%r, box={(%r,%r), (%r,%r)}, area=%r" \
                    % (self.name, self.llx, self.lly, self.urx, self.ury, self.area)
        except TypeError:
            return "Node(name=%r)" % (self.name)

    __str__ = __repr__

    def __hash__(self):
        return hash(self.name)

    def __eq__(self, other):
        return self.__hash__() == other.__hash__()

    def __lt__(self, other):
        return self.area < other.area

    def __len__(self):
        return len(self.sink_dict)

    def __iter__(self):
        # return iter(self.sink_dict.items())
        return iter(self.sink_dict.values())
コード例 #7
0
def document(infolder,
             outfolder,
             extension,
             loader,
             external_css=None,
             generate_toc=None,
             overwrite=False):
    # Get previously generated TOC object
    TOC = os_path_join(infolder, '.cdoc_toc')
    try:
        with open(TOC, 'rb') as file:
            old_toc = pickle_load(file)
    except (FileNotFoundError, EOFError):
        old_toc = table_Dict2D(OrderedDict)

    # Create new TOC object
    new_toc = table_Dict2D(OrderedDict)

    # TODO: do we really need a separate OrderedDict for pages ???
    pages = OrderedDict()
    anonym = iter_count()

    # TODO: Create real dependency graphs
    #       Document object:
    #           parents  = set()  # other documents depending on this document
    #           children = set()  # other documents this document depending on
    #
    #       If document changed:
    #           set all parents of document => changed
    #
    #       If any of its children changed:
    #           set all parents of child => changed
    #
    #       -- The loop should check if a document's change flag has already
    #          been set. If not, hash file, and set flag, and notify all
    #          dependencies (parents)

    # Load all pages
    with check_Checker(infolder, file='.cdoc_cache',
                       lazy_update=True) as checker:
        # Go through all files
        for file in os_listdir(infolder):
            # If file has the proper extension
            if file.endswith(extension):
                # Create full file path
                filepath = os_path_join(infolder, file)
                # If file has been changed since last check
                if checker.ischanged(filepath) and not overwrite:
                    # Regenerate file
                    filename, pagename, depends = \
                        _process(infolder, file, filepath, pages, loader, anonym)
                # If file hasn't been changed
                else:
                    # If file has been cached before
                    try:
                        # Get previous infos
                        filename, depends = old_toc[filepath]
                        pagename = old_toc.otherkey(filepath)
                        pages[pagename] = None
                        # If any of the dependencies has changed
                        for dependency in depends:
                            if checker.ischanged(dependency) and not overwrite:
                                # Regenerate file
                                filename, pagename, depends = \
                                    _process(infolder, file, filepath, pages, loader, anonym)
                                break
                    # If file is new and hasn't been cached before
                    except KeyError:
                        # Generate it for the first time
                        filename, pagename, depends = \
                            _process(infolder, file, filepath, pages, loader, anonym)
                # Store new values
                new_toc[pagename:filepath] = filename, depends

    # If order changing, renaming, inserting, deleting, etc. happened
    if set(old_toc) - set(new_toc):
        for pagename, filepath in new_toc.keys():
            if pages[pagename] is None:
                _process(os_path_basename(filepath), filepath, pages, loader,
                         anonym)

    # Write back TOC object
    with open(TOC, 'wb') as file:
        pickle_dump(new_toc, file, pickle_HIGHEST_PROTOCOL)
    # Generate Table of Content?
    if generate_toc is None:
        generate_toc = len(new_toc) > 1
    # Create documents
    _build(pages, outfolder, generate_toc, new_toc, external_css)