Пример #1
0
def c_layout(i, definition, template):
    c_name = layer_names[i]
    pretty_name = c_name.strip('_').capitalize()
    layout = d['layout']
    
    surround = lambda s: ''.join(interleave_longest(['│']*(len(s)+1), s))
    layer = list(map(uni, definition))
    layer[41] = layer[41].center(11)
    layer = chunked(layer, 12)
    rows = intersperse(mid, map(surround, layer))
    pretty = '\n'.join(itertools.chain([top], rows, [bottom]))
    
    surround = lambda s: ', '.join(s)
    layer = list(map(lambda k: layer_name.get(k, k), definition))
    layer = chunked(layer, 12)
    rows = map(surround, layer)
    c_layer = ',\n    '.join(itertools.chain([], rows, []))
    
    return template.format(pretty_name, pretty, c_name, layout, c_layer)
Пример #2
0
def c_layout(i, definition, template):
    c_name = layer_names[i]
    pretty_name = c_name.strip('_').capitalize()
    layout = d['layout']

    surround = lambda s: ''.join(interleave_longest(['│'] * (len(s) + 1), s))
    layer = list(map(uni, definition))
    layer[41] = layer[41].center(11)
    layer = chunked(layer, 12)
    rows = intersperse(mid, map(surround, layer))
    pretty = '\n'.join(itertools.chain([top], rows, [bottom]))

    surround = lambda s: ', '.join(s)
    layer = list(map(lambda k: layer_name.get(k, k), definition))
    layer = chunked(layer, 12)
    rows = map(surround, layer)
    c_layer = ',\n    '.join(itertools.chain([], rows, []))

    return template.format(pretty_name, pretty, c_name, layout, c_layer)
Пример #3
0
 def compile(self):
     # Construction de l'objet QuantumCircuit
     self.results = []
     self.snapshots = []
     for x in range(len(self.tracks[0].input)):
         gate_series = []
         simulator = qs.Aer.get_backend("statevector_simulator")
         simulator.set_options(device='GPU')
         qr = qs.QuantumRegister(len(self.tracks))
         cr = qs.ClassicalRegister(len(self.tracks))
         qc = qs.QuantumCircuit(qr, cr)
         for qubit_index in range(len(self.tracks)):
             gate_series.append(self.tracks[qubit_index].gates)
             qc.initialize(self.tracks[qubit_index].input[x].state,
                           qr[qubit_index])
         for gate in list(interleave_longest(*gate_series)):
             gate.qiskit_equivalent_dispatcher(qc)
         qc.snapshot("final state")
         self.results.append(qs.execute(qc, backend=simulator).result())
         self.snapshots.append(self.results[x].data()["snapshots"]
                               ["statevector"]["final state"][0])
def robust_qmsg(parent=None, msg_type="warning", title: str = "", body: Union[str, List[str]] = "",
                variables: Union[str, List[str]] = None) -> None:
    """
    Convenience function for printing out a QMessageBox, optionally with variables interleaved with strings to produce
    a conherent final message.

    Parameters
        • parent:  the parent to assign this QMessageBox to
        • msg_type: a string denoting the type of QMessageBox that should be displayed. One of "warning" (default),
        "information", or "critical"
        • title: a string denoting the title of the QMessageBox
        • body: a string or list of strings denoting the content of the QMessageBox. If a list of strings, the function
        expects variables to also be a list of strings which will interleave with body to form the final message.
        • variables: a string or list of strings denoting the additional variables to interleave into the main
        messages's content

    Returns
        • None


    """
    msg_box = QMessageBox()
    if system() == "Darwin":
        msg_box.setDefaultButton(QMessageBox.Ok)
    if parent is None:
        parent = QWidget()
    if isinstance(body, list) and isinstance(variables, list):
        content = "".join(interleave_longest(body, variables))
    elif isinstance(body, str) and isinstance(variables, str):
        content = body + variables
    elif isinstance(body, str) and isinstance(variables, list):
        content = body + "\n".join(variables)
    elif isinstance(body, str) and variables is None:
        content = body
    else:
        raise ValueError(f"{robust_qmsg.__name__} received incompatible arguments. body was of type {type(body)} and "
                         f"variables was of type {type(variables)}")

    getattr(msg_box, msg_type)(parent, title, content, QMessageBox.Ok)
    return
Пример #5
0
 def flatten_iterleave_approach(self):
     # This works fine...
     self.flat_list = list(
         more_itertools.interleave_longest(*self.iterable_list))
     print(self.flat_list)
Пример #6
0
 def generate_p_list(self):
     return list(
         more_itertools.interleave_longest(self.p_list_pulses,
                                           self.p_list_tau))
Пример #7
0
    def run(self):
        from more_itertools import interleave_longest

        protocol = ProtocolNode()

        if not self.content and not self.arguments:
            self.state.inliner.reporter.warning(f"empty protocol.")

        def split_arguments():
            import shlex
            if not self.arguments:
                return
            for path in shlex.split(self.arguments[0]):
                yield Path(path)

        def split_content():
            content_blocks = []

            # Use slicing to split the blocks, because this automatically makes
            # properly configured docutils.statemachine.StringList views.
            i = 0
            for j, line in enumerate(self.content):
                if line.strip() == '***':
                    content_blocks += [self.content[i:j]]
                    i = j + 1

            content_blocks += [self.content[i:]]
            return content_blocks

        def attach_literal_node(path):
            from sphinx.directives.code import LiteralIncludeReader
            from sphinx.util.nodes import set_source_info
            nonlocal protocol

            if path.suffix == '.txt':
                # <literal_block highlight_args="{'linenostart': 1}"
                # linenos="False"
                # source="/home/kale/research/projects/201904_bind_dna/notebook/20190604_dnase_pick_qpcr_primers/20190604_pcr.txt"
                # xml:space="preserve">
                #     ...

                # From `sphinx/directives/code.py`:
                env = self.state.document.settings.env
                location = self.state_machine.get_source_and_line(self.lineno)
                rel_filename, filename = env.relfn2path(str(path))
                env.note_dependency(rel_filename)

                reader = LiteralIncludeReader(filename, self.options,
                                              env.config)
                text, lines = reader.read(location=location)

                literal_node = nodes.literal_block(text, text, source=filename)
                set_source_info(self, literal_node)

                protocol += [literal_node]

            else:
                from sphinx.roles import specific_docroles
                protocol += specific_docroles['download'](
                    'download',
                    rawtext=str(path),
                    text=str(path),
                    lineno=self.lineno,
                    inliner=self.state.inliner,
                )[0]

        def attach_content_node(content):
            if content:
                self.state.nested_parse(content, content.offset(0), protocol)

        content = [(attach_content_node, x) for x in split_content()]
        literal = [(attach_literal_node, x) for x in split_arguments()]

        for add_to_protocol, *args in interleave_longest(content, literal):
            add_to_protocol(*args)

        paragraph = nodes.paragraph()
        paragraph += protocol
        return [paragraph]
Пример #8
0
    def _read(self, file_path):

        loop = asyncio.get_event_loop()
        dataset_db = loop.run_until_complete(
            create_dataset_db(
                dataset_path=self._dataset_path,
                db_discriminator=self._db_discriminator,
                should_save_sentiment=self._save_sentiment,
                file_path=file_path,
                use_existing_database=self._use_existing_cached_db,
                ner_model=self._ner_model,
                coreference_model=self._coreference_model,
                truncate_sequence_length=self._truncate_sequence_length,
                cuda_device=self._cuda_device))

        db = dataset.connect(dataset_db, engine_kwargs={"pool_recycle": 3600})

        negative_sampler = negative_sentence_sampler(db)

        # If interleaving then best to sort batches of stories by length so there are fewer left over sentences.
        if self._interleave_story_sentences:
            order_story = "sentence_num"
        else:
            order_story = "id"
        stories = db.query(
            f'SELECT * FROM story  WHERE sentence_num >= {self._min_story_sentences} '
            f'AND sentence_num <= {self._max_story_sentences} ORDER BY {order_story}'
        )

        chunked_stories = more_itertools.chunked(stories, self._story_chunking)

        for chunks in chunked_stories:
            chunk_instances = []
            for story in chunks:

                story_instances = []

                story_id = story["id"]

                # Id will be the same as the sentence num as they are inserted as a batch in sequence.
                sentences = [
                    s for s in db.query(
                        f'SELECT * FROM sentence WHERE story_id = {story_id} ORDER BY id'
                    )
                ]
                if len(sentences) == 0:
                    logging.warning(f"Story has no sentences: {story_id}")
                    continue

                if self._named_entity_embeddings:
                    self.encode_named_entities(story_id, sentences, db)

                sentence_nums = [s["sentence_num"] for s in sentences]
                for source_indices, target_indices, absolute_position, relative_position in dual_window(
                        sentence_nums,
                        context_size=self._sentence_context_window,
                        predictive_size=self._sentence_predictive_window,
                        num_of_sentences=story["sentence_num"]):

                    source_sequence = [
                        sentences[i] for i in source_indices if i is not None
                    ]
                    target_sequence = [
                        sentences[i] for i in target_indices if i is not None
                    ]

                    if self._target_negative:
                        negative_sequence = []
                        for i in range(self._sentence_predictive_window):
                            sentence = next(negative_sampler)
                            negative_sequence.append(sentence)

                        if self._named_entity_embeddings:
                            self.encode_named_entities(story_id,
                                                       negative_sequence, db)

                    metadata = {
                        "story_id": story_id,
                        "absolute_position": absolute_position,
                        "relative_position": relative_position,
                        "number_of_sentences": story["sentence_num"]
                    }

                    if len(source_sequence) == 0 or (len(target_sequence) == 0
                                                     and len(negative_sequence)
                                                     == 0):
                        continue

                    story_instances.append((source_sequence, target_sequence,
                                            negative_sequence, metadata))

                chunk_instances.append(story_instances)

            if not self._interleave_story_sentences:
                # Just flatten in the normal order.
                sorted_instances = more_itertools.flatten(chunk_instances)
            else:

                # Reorder the sentences so one sentence per batch is in sentence order.
                sorted_instances = more_itertools.interleave_longest(
                    *chunk_instances)

            for instance in sorted_instances:
                yield self.text_to_instance(instance[0], instance[1],
                                            instance[2], instance[3])