Esempio n. 1
0
    def __init__(self,
                 filename,            # type: str
                 requirements=None,   # type: Optional[InstallReqSet]
                 nested_cfiles=None,  # type: Optional[InstallReqFileSet]
                 nested_rfiles=None,  # type: Optional[InstallReqFileSet]
                 index_urls=None,     # type: Optional[List[str]]
                 ):
        # type: (...) -> None
        """Constructs a new :class:`RequirementFile`.

        Args:
            filename: The path to a requirements file. The requirements
                file is not required to exist.
            requirements: A set of :class:`HashableInstallRequirement`.
                If **filename** points to a path that exists and
                **requirements** are not provided, then the requirements
                will be parsed from the target file.
            nested_cfiles: A set of :class:`RequirementFile`.
            nested_rfiles: A set of :class:`RequirementFile`.
            index_urls: A set of Python package index URLs. The first
                URL is assumed to be the primary index URL, while the
                rest are extra.

        """
        self.filename = pathlib.Path(filename)
        self.requirements = requirements or ordered_set.OrderedSet()
        self.index_urls = ordered_set.OrderedSet(index_urls)
        self.nested_cfiles = nested_cfiles or ordered_set.OrderedSet()
        self.nested_rfiles = nested_rfiles or ordered_set.OrderedSet()

        if requirements is None and self.filename.exists():
            self.reload()
Esempio n. 2
0
    def parse(self, *args):
        # type: (str) -> ParseResultType
        """Parses a requirements file.

        Args:
            *args: Command-line options and arguments passed to pip.

        Returns:
            A set of requirements, index URLs, nested constraint files,
            and nested requirements files.

            The nested constraint and requirements files are sets of
            :class:`RequirementFile` instances.

        """
        self.nested_files = self.parse_nested_files()
        pip_options, session = build_pip_session(*args)
        repository = PyPiRepository(pip_options, session)
        requirements = pip.req.parse_requirements(
            str(self.filename),
            finder=repository.finder,
            session=repository.session,
            options=pip_options)
        requirements = ordered_set.OrderedSet(sorted(
            (HashableInstallRequirement.from_ireq(ireq)
             for ireq in requirements),
            key=lambda ireq: str(ireq)))
        index_urls = ordered_set.OrderedSet(repository.finder.index_urls)
        nested_cfiles, nested_rfiles = self.parse_nested_files()
        nested_requirements = set(itertools.chain(
            *(requirements_file.requirements
              for requirements_file in nested_rfiles)))
        requirements -= nested_requirements
        return requirements, index_urls, nested_cfiles, nested_rfiles
Esempio n. 3
0
    def parse_nested_files(self):
        # type: () -> Tuple[InstallReqFileSet, InstallReqFileSet]
        """Parses a requirements file, looking for nested files.

        Returns:
            A set of constraint files and requirements files.

        """
        nested_cfiles = ordered_set.OrderedSet()
        nested_rfiles = ordered_set.OrderedSet()
        parser = pip.req.req_file.build_parser()
        defaults = parser.get_default_values()
        defaults.index_url = None
        with io.open(str(self.filename), 'r') as f:
            for line in f:
                if line.startswith('#'):
                    continue
                args_str, options_str = pip.req.req_file.break_args_options(
                    line)
                opts, _ = parser.parse_args(shlex.split(options_str), defaults)
                if opts.requirements:
                    filename = self.filename.parent / opts.requirements[0]
                    nested_rfiles.add(self.__class__(str(filename)))
                elif opts.constraints:
                    filename = self.filename.parent / opts.constraints[0]
                    nested_cfiles.add(self.__class__(str(filename)))
        return nested_cfiles, nested_rfiles
Esempio n. 4
0
 def __init__(self):
     self.utxo = ordered_set.OrderedSet()
     self.utxo_pool = ordered_set.OrderedSet()
     self.memory_pool = ordered_set.OrderedSet()
     self.chain = []
     self.tx_position = {}
     self.data_position = {}
Esempio n. 5
0
    def _generate_square_key(self, key, removed_char):
        alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
        alphabet = alphabet.replace(removed_char, '')
        alphabet = ordered_set.OrderedSet(alphabet)

        preprocessed_key = key.upper()
        preprocessed_key = preprocessed_key.replace(removed_char, '')
        preprocessed_key = preprocessed_key.replace(' ', '')
        ordered_unique_key = ordered_set.OrderedSet(preprocessed_key)
        ordered_unique_key |= alphabet
        self._write_square_key(ordered_unique_key, 'playfair.json')
        return ordered_unique_key
Esempio n. 6
0
 def __new__(cls):
     # yamlizable does not call __init__, so attributes that are not defined above
     # need to be initialized here
     self = yamlize.Object.__new__(cls)
     self.assemblies = {}
     self._prepped = False
     self._assembliesBySpecifier = {}
     self.allNuclidesInProblem = (
         ordered_set.OrderedSet()
     )  # Better for performance since these are used for lookups
     self.activeNuclides = ordered_set.OrderedSet()
     self.inertNuclides = ordered_set.OrderedSet()
     self.elementsToExpand = []
     return self
Esempio n. 7
0
    def check_license(self):
        """Checks if license_file' or 'license_files' is configured and adds any
        valid paths to 'self.filelist'.
        """

        files = ordered_set.OrderedSet()

        opts = self.distribution.get_option_dict('metadata')

        # ignore the source of the value
        _, license_file = opts.get('license_file', (None, None))

        if license_file is None:
            log.debug("'license_file' option was not specified")
        else:
            files.add(license_file)

        try:
            files.update(self.distribution.metadata.license_files)
        except TypeError:
            log.warn("warning: 'license_files' option is malformed")

        for f in files:
            if not os.path.exists(f):
                log.warn(
                    "warning: Failed to find the configured license file '%s'",
                    f)
                files.remove(f)

        self.filelist.extend(files)
Esempio n. 8
0
 def _add_var_type_info(self, fld, var_name: str, type_info: t.Any):
     # , scope: t.Any=None
     if var_name not in fld:
         fld[var_name] = ordered_set.OrderedSet()
     var_type_info = fld[var_name]
     if type_info is not None:
         var_type_info.add(type_info)
Esempio n. 9
0
 def test_function_def(self):
     for ast_module in AST_MODULES:
         resolver = TypeHintResolver[ast_module,
                                     ast](globals_=GLOBALS_EXTERNAL)
         typer = StaticTyper[ast_module]()
         for description, example in FUNCTIONS_SOURCE_CODES.items():
             function_local_vars = FUNCTIONS_LOCAL_VARS[description]
             function_local_vars = {
                 k: ordered_set.OrderedSet(v)
                 for k, v in function_local_vars.items()
             }
             with self.subTest(ast_module=ast_module,
                               msg=description,
                               example=example):
                 tree = ast_module.parse(example)
                 function = tree.body[0]
                 function = resolver.visit(function)
                 function = typer.visit(function)
                 self.assertIsInstance(
                     function, StaticallyTypedFunctionDef[ast_module])
                 if ast_module is ast:
                     self.assertEqual(len(function._local_vars),
                                      len(function_local_vars))
                 else:
                     self.assertDictEqual(function_local_vars,
                                          function._local_vars)
                 _LOG.info('%s', function)
Esempio n. 10
0
 def _determine_percentage_column_base(self, columns) -> str:
     percentage_column = None
     for candidate in self._percentage_column_candidates:
         col = self._fundamental_column_prefix + candidate
         if col in columns:
             percentage_column = col
             break
     if percentage_column is None:
         unique_prefixes = list(
             ordered_set.OrderedSet(_.partition(':')[0] for _ in columns))
         _LOG.warning(
             '%s: percentage column candidates %s%s not found, trying %s:%s',
             self._db_path, self._fundamental_column_prefix,
             self._percentage_column_candidates, unique_prefixes,
             self._percentage_column_candidates)
         for prefix in unique_prefixes:
             for candidate in self._percentage_column_candidates:
                 col = '{}:{}'.format(prefix, candidate)
                 if col in columns:
                     percentage_column = col
                     break
             if percentage_column is not None:
                 break
     assert percentage_column is not None, columns
     return percentage_column
Esempio n. 11
0
 def _add_var(self, var_place: str, var_name: str, type_info: t.Any):
     # , scope: t.Any = None
     vars_ = getattr(self, var_place)
     if var_name not in vars_:
         vars_[var_name] = ordered_set.OrderedSet()
     var_type_info = vars_[var_name]
     if type_info is not None:
         var_type_info.add(type_info)
Esempio n. 12
0
 def __init__(self, *args, resolved_returns=None, **kwargs):
     self.resolved_returns = resolved_returns
     self._kind = FunctionKind.Undetermined
     self._params = {}
     self._returns = ordered_set.OrderedSet()
     self._local_vars = {}
     self._nonlocal_assignments = {}
     # self._scopes = []
     super().__init__(*args, **kwargs)
Esempio n. 13
0
def generate_candidates(g1, g2, mapping, node1, node2):
    """Generates the initial candidates for graph search."""
    # Check predecessors
    p1 = set(n for n in g1.predecessors(node1) if n not in mapping.keys())
    p2 = set(n for n in g2.predecessors(node2) if n not in mapping.values())
    candidates = ordered_set.OrderedSet(itertools.product(p1, p2))
    s1 = set(n for n in g1.successors(node1) if n not in mapping.keys())
    s2 = set(n for n in g2.successors(node2) if n not in mapping.values())
    candidates.update(list(itertools.product(s1, s2)))
    return candidates
Esempio n. 14
0
    def __init__(self, *args, timeout=None, **kwargs) -> None:
        super().__init__(*args, **kwargs)

        # Store timeout
        self.timeout = timeout

        # Override server protocols set
        self.protocols = ordered_set.OrderedSet()

        # Most recent connection closed future
        self._most_recent_connection_closed_future = asyncio.Future(loop=self._loop)
Esempio n. 15
0
def buildVocabulary(messages):
    vocab = ordered_set.OrderedSet()
    vocabDict = {}
    for message in messages:
        #Loops through the list of elements created by splitting the subject and splitting the snippet
        messageText = message['subject'].split(' ')
        messageText.extend(message['snippet'].split(' '))
        for word in messageText:
            if word in vocab:
                vocabDict[word] = vocabDict[word] + 1
            else:
                vocab.add(word)
                vocabDict[word] = 1
    return [vocab, vocabDict]
Esempio n. 16
0
    def count_clusters(self, path):
        """

        :param path:
        :type path: list(core.segment.Segment)
        :return:
        :rtype: list(core.cluster.BaseCluster)
        """
        path_clusters = list()
        current_segment = path[0]
        for next_segment in path[1:]:

            current_clusters = self.segment_clusters(current_segment)
            next_clusters = self.segment_clusters(next_segment)

            if len(current_clusters) > 1 and len(next_clusters) > 1:
                # Both are on relay points, so the current_segment cluster must
                # be the common one between them.

                for cluster in current_clusters:
                    if cluster in next_clusters:
                        path_clusters.append(cluster)
                        break

            elif len(current_clusters) > 1:
                # The current_segment segment is on a relay point. In this
                # case, the next_segment segment is not on a relay point, so
                # we can just use that.

                path_clusters.append(next_clusters[0])

            elif len(next_clusters) > 1:
                # The next_segment segment is on a relay point. In this case,
                # the current_segment segment is only in one cluster, so we
                # just use that.

                path_clusters.append(current_clusters[0])

            else:
                # Neither segments are relay points, just use the
                # current_segment cluster
                pass

            # Move current_segment to the next_segment segment
            current_segment = next_segment

        # Remove any duplicates
        path_clusters = list(orderedset.OrderedSet(path_clusters))
        return path_clusters
Esempio n. 17
0
def query_registry() -> pd.DataFrame:
    """Gather information about supported languages in transpyle and scope of their support."""
    classes = (Parser, AstGeneralizer, Unparser, Compiler, Binder)
    distinct_languages = list(
        ordered_set.OrderedSet(Language.registered.values()))
    language_support = {}
    for language in distinct_languages:
        language_support[language] = [
            cls.find(language) is not None for cls in classes
        ]
    return pd.DataFrame(
        columns=classes,
        index=distinct_languages,
        data=[support for _, support in language_support.items()],
        dtype=bool)
Esempio n. 18
0
 def _add_params_type_info(self):
     args, vararg, kwonlyargs, kw_defaults, kwarg, defaults = \
         self.args.args, self.args.vararg, self.args.kwonlyargs, self.args.kw_defaults, \
         self.args.kwarg, self.args.defaults
     if vararg or kwonlyargs or kwarg:
         raise NotImplementedError('only simple function definitions are supported')
     if kw_defaults or defaults:
         _LOG.warning('ignoring default parameter values in %s: %s and %s',
                      self.name, kw_defaults, defaults)
     for i, arg in enumerate(args):
         if i == 0:
             if self._kind in (FunctionKind.Constructor, FunctionKind.InstanceMethod) \
                     and arg.arg == 'self':
                 continue
             if self._kind is FunctionKind.ClassMethod and arg.arg == 'cls':
                 continue
         type_info = ordered_set.OrderedSet()
         if getattr(arg, 'resolved_annotation', None) is not None:
             type_info.add(arg.resolved_annotation)
         if getattr(arg, 'resolved_type_comment', None) is not None:
             type_info.add(arg.resolved_type_comment)
         self._params[arg.arg] = type_info
Esempio n. 19
0
    def Web_request_to_API(self, total_tweets, twitter_username):
        """ Returns  a collection populated with tweets"""
        self.mongoDB_url = self.mongoDB_url.twitter_username
        ids = ordered_set.OrderedSet()
        #### statuses.user_timeline
        #### don't have time to use num_tweets as the total number of tweets that are scraped
        search = self.twitter_api.statuses.user_timeline(
            screen_name=twitter_username, count=200)
        # to prevent duplicate tweets
        maxID = search[-1]['id'] - 1

        while len(ids) <= total_tweets:

            self.collect.append(search)
            try:
                search = self.twitter_api.statuses.user_timeline(
                    screen_name=twitter_username, count=200, max_id=maxID)
            except KeyError, e:
                print 'keyerror'
                break
            except twitter.TwitterHTTPError:
                print 'sleeping for 16 minutes'
                time.sleep(16 * 60)
Esempio n. 20
0
        def Web_request_to_API(self, total_tweets, username):

            count = 199
            ids = ordered_set.OrderedSet()

            search = self.twitter_api.statuses.user_timeline(
                screen_name=username, count=count)
            maxID = search[-1]['id'] - 1

            # end loop once number of scraped tweets drops below 5 per search
            while len(ids) < total_tweets:

                self.collect.append(search)

                try:
                    search = self.twitter_api.statuses.user_timeline(
                        screen_name=username, count=count, max_id=maxID)
                except KeyError, e:
                    print 'keyerror'
                    break
                except twitter.TwitterHTTPError:
                    print "twitter.TwitterHTTPError"
                    print 'sleeping for ~ 16 minutes'
                    time.sleep(16 * 60)
Esempio n. 21
0
def build_ireq_set(specifiers,                    # type: Iterable[str]
                   index_urls=None,  # type: Optional[Iterable[str]]
                   prereleases=False,             # type: bool
                   resolve_canonical_names=True,  # type: bool
                   resolve_source_dir=None,       # type: str
                   resolve_versions=True,         # type: bool
                   sort_specifiers=True,          # type: bool
                   ):
    # type: (...) -> InstallReqSet
    """Builds a set of install requirements.

    Args:
        specifiers: A list of specifier strings.
        index_urls: List of Python package indexes. Only used if
            **resolve_canonical_names** or **resolve_versions** is
            ``True``.
        prereleases: Whether or not to include prereleases.
        resolve_canonical_names: Queries package indexes provided by
            **index_urls** for the canonical name of each
            specifier. For example, *flask* will get resolved to
            *Flask*.
        resolve_source_dir: If an editable local directory is provided,
            rewrites the path to a path relative to the given
            absolute path.
        resolve_versions: Queries package indexes for latest package
            versions.
        sort_specifiers: Sorts specifiers alphabetically.

    Returns:
        A set of :class:`HashableInstallRequirement`.

    """
    install_requirements = ordered_set.OrderedSet()
    if sort_specifiers:
        specifiers = sorted(specifiers)
    for specifier in specifiers:
        if specifier.startswith('-e'):
            ireq = HashableInstallRequirement.from_line(specifier)
        else:
            args = []
            for index_url in index_urls:
                args.extend(['--extra-index-url', index_url])
            ireq = resolve_specifier(specifier, prereleases, resolve_versions,
                                     *args)
        if resolve_canonical_names and not ireq.editable:
            package_name = piptools.utils.name_from_req(ireq)
            canonical_name = get_canonical_name(
                package_name=package_name, index_urls=index_urls)
            update_ireq_name(
                install_requirement=ireq, package_name=canonical_name)
        elif resolve_source_dir is not None and ireq.source_dir:
            try:
                ireq.source_dir = str(
                    pathlib.Path(ireq.source_dir)
                    .relative_to(pathlib.Path(resolve_source_dir)))
                ireq.link = pip.index.Link('file://{}'.format(
                    ireq.source_dir))
            except ValueError:
                pass
        install_requirements.add(ireq)
    return install_requirements
Esempio n. 22
0
 def __init__(self, monitored_property, *records, **kwargs):
     self._category_to_index = defaultdict(list)
     self._category_set = ordered_set.OrderedSet()
     super(CategoricalRecordCollection, self).__init__(*records, **kwargs)
     self._split(monitored_property)
Esempio n. 23
0
 def extra_index_urls(self):
     # type: () -> Set[str]
     """Extra Python package index URLs."""
     if len(self.index_urls) > 1:
         return self.index_urls[1:]
     return ordered_set.OrderedSet()
Esempio n. 24
0











import ordered_set
import timeit

o = ordered_set.OrderedSet()

setup = """
import ordered_set
import collections
n = 1000
every = 100
objlist = [object() for _ in range(n)]
findobjlist = [objlist[i] for i in range(0,n,every)]

oset = ordered_set.OrderedSet(objlist)
uoset = set(objlist)
olist = collections.OrderedDict.fromkeys(objlist, None)

"""
Esempio n. 25
0
    def _resolveNuclides(self, cs):
        """
        Process elements and determine how to expand them to natural isotopics.

        Also builds meta-data about which nuclides are in the problem.

        This system works by building a dictionary in the
        ``elementsToExpand`` attribute with ``Element`` keys
        and list of ``NuclideBase`` values.

        The actual expansion of elementals to isotopics occurs during
        :py:meth:`Component construction <armi.reactor.blueprints.componentBlueprint.
        ComponentBlueprint._constructMaterial>`.
        """

        from armi import utils

        actives = set()
        inerts = set()
        undefBurnChainActiveNuclides = set()
        if self.nuclideFlags is None:
            self.nuclideFlags = isotopicOptions.genDefaultNucFlags()

        self.elementsToExpand = []
        for nucFlag in self.nuclideFlags:
            # this returns any nuclides that are flagged specifically for expansion by input
            expandedElements = nucFlag.fileAsActiveOrInert(
                actives, inerts, undefBurnChainActiveNuclides)
            self.elementsToExpand.extend(expandedElements)

        inerts -= actives
        self.customIsotopics = self.customIsotopics or isotopicOptions.CustomIsotopics(
        )
        (
            elementalsToKeep,
            expansions,
        ) = isotopicOptions.autoSelectElementsToKeepFromSettings(cs)

        nucsFromInput = actives | inerts  # join

        # Flag all elementals for expansion unless they've been flagged otherwise by
        # user input or automatic lattice/datalib rules.
        for elemental in nuclideBases.instances:
            if not isinstance(elemental, nuclideBases.NaturalNuclideBase):
                # `elemental` may be a NaturalNuclideBase or a NuclideBase
                # skip all NuclideBases
                continue

            if elemental in elementalsToKeep:
                continue

            if elemental.name in actives:
                currentSet = actives
                actives.remove(elemental.name)
            elif elemental.name in inerts:
                currentSet = inerts
                inerts.remove(elemental.name)
            else:
                # This was not specified in the nuclide flags at all.
                # If a material with this in its composition is brought in
                # it's nice from a user perspective to allow it.
                # But current behavior is that all nuclides in problem
                # must be declared up front.
                continue

            self.elementsToExpand.append(elemental.element)

            if (elemental.name in self.nuclideFlags
                    and self.nuclideFlags[elemental.name].expandTo):
                # user-input has precedence
                newNuclides = [
                    nuclideBases.byName[nn] for nn in self.nuclideFlags[
                        elemental.element.symbol].expandTo
                ]
            elif (elemental in expansions
                  and elemental.element.symbol in self.nuclideFlags):
                # code-specific expansion required
                newNuclides = expansions[elemental]
                # overlay code details onto nuclideFlags for other parts of the code
                # that will use them.
                # CRAP: would be better if nuclideFlags did this upon reading s.t.
                # order didn't matter. On the other hand, this is the only place in
                # the code where NuclideFlags get built and have user settings around
                # (hence "resolve").
                # This must be updated because the operative expansion code just uses the flags
                #
                # Also, if this element is not in nuclideFlags at all, we just don't add it
                self.nuclideFlags[elemental.element.symbol].expandTo = [
                    nb.name for nb in newNuclides
                ]
            else:
                # expand to all possible natural isotopics
                newNuclides = elemental.element.getNaturalIsotopics()

            for nb in newNuclides:
                currentSet.add(nb.name)

        if self.elementsToExpand:
            runLog.info(
                "Will expand {} elementals to have natural isotopics".format(
                    ", ".join(element.symbol
                              for element in self.elementsToExpand)))

        self.activeNuclides = ordered_set.OrderedSet(sorted(actives))
        self.inertNuclides = ordered_set.OrderedSet(sorted(inerts))
        self.allNuclidesInProblem = ordered_set.OrderedSet(
            sorted(actives.union(inerts)))

        # Inform user which nuclides are truncating the burn chain.
        if undefBurnChainActiveNuclides:
            runLog.info(
                tabulate.tabulate(
                    [[
                        "Nuclides truncating the burn-chain:",
                        utils.createFormattedStrWithDelimiter(
                            list(undefBurnChainActiveNuclides)),
                    ]],
                    tablefmt="plain",
                ),
                single=True,
            )
Esempio n. 26
0
def mine():
    """
    This function serves as an interface to add the pending
    transactions to the blockchain by adding them to the block
    and figuring out Proof Of Work.
    """

    if len(blockchain.memory_pool) == 0:
        print(colored("No transactions to mine.", "red"))
        return

    print("Starting to mine block:", len(blockchain.chain))

    # Gather all tx ids.
    tx_ids = [tx.tx_id for tx in blockchain.memory_pool]

    # Create block header for our candidate block.
    header = BlockHeader(previous_block_hash=blockchain.last_block.hash,
                         merkle_root=merkle_root(tx_ids))

    # Try to guess the correct hash given a certain difficulty.
    computed_hash = header.compute_hash()
    while not computed_hash.startswith('0' * Blockchain.difficulty):
        header.nonce += 1
        computed_hash = header.compute_hash()

    print("Found correct nonce:", header.nonce, " Hash:", computed_hash[0:10])

    # Update UTXO and transaction position.
    for index, transaction in enumerate(blockchain.memory_pool):

        # Do not include tx if something goes wrong.
        try:
            pass
        except Exception as e:
            pass

        # Add tx position.
        blockchain.tx_position[transaction.tx_id] = TXPosition(
            len(blockchain.chain), index)
        # Add outputs as unspent.
        for utxo in transaction.tx_outputs:
            blockchain.utxo.add(utxo)
        # Remove utxo that now are spent.
        for input_tx in transaction.tx_inputs:
            blockchain.utxo.remove(input_tx)
            if input_tx in blockchain.utxo_pool:
                blockchain.utxo_pool.remove(input_tx)

    new_block = Block(index=len(blockchain.chain),
                      transactions=list(blockchain.memory_pool),
                      header=header)

    new_block.hash = new_block.compute_hash()

    blockchain.chain.append(new_block)
    blockchain.memory_pool = ordered_set.OrderedSet()

    print("New block added:", len(blockchain.chain))

    # TODO: Test for balance.
    total = 0

    for utxo in blockchain.utxo:
        total += utxo.amount

    print(
        colored("Total:" + str(total) + " - UTXO:" +
                str(len(blockchain.utxo))))
Esempio n. 27
0
 def dead_connections(self):
     return list(
         ordered_set.OrderedSet(self.connections) -
         ordered_set.OrderedSet(self.alive_connections))
Esempio n. 28
0
def parseTaxon(taxon, parser, phrases=False, treefilebase=None, cleantree=True, draw=False, outfile=None, ttrace=0):

    print('\rTAXON: ', taxon.flora, taxon.family, taxon.genus, taxon.species)
    if outfile:
        print('\rTAXON: ', taxon.flora, taxon.family, taxon.genus, taxon.species, file=outfile)
    flora = taxon.flora
    famname = taxon.family
    taxname = taxon.genus + ' ' + taxon.species
    taxonNo = taxon.taxonNO
    logging.info('TAXON:  ' + taxname)
    cfset = ordered_set.OrderedSet()
    for sent in taxon.sentences:
        mainsubject = 'testing'
        for iphrase, phrase in enumerate(sent.phrases):
            logging.info('PARSING: ' + phrase.text)
            # print('\rPARSING: ', phrase.text, file=cf)
            ptime = time.process_time()
            try:
                trees = parser.parse(phrase.tokens, cleantree=cleantree, maxtrees=100)
            except:
                # e = sys.exc_info()
                print('Parser failure!')
                traceback.print_exc()
                continue
            if True:
                tokens = parser._chart._tokens
                # cfset.clear()
                subject = ''
                for t, txtstart, txtend in parser.listSUBJ():
                    cleanparsetree(t)
                    if ttrace: print('Text: ', sent.text[txtstart:txtend])
                    H = t[()].label()['H']
                    subject = H['orth']
                    if iphrase == 0:
                        mainsubject = subject
                    DumpChars(taxonNo, flora, famname, taxname, mainsubject, subject, '', H, tokens, sent.text,
                              phrase.slice.start + sent.slice.start, phrase.slice.stop + sent.slice.start, indent=1,
                              cset=cfset)
                    if phrases:
                        cfset[-1].value = phrase.text
                        continue
                if phrases: continue
                charlist = parser.listCHARs(getCHR=True if trees else False)
                for t, txtstart, txtend in charlist:
                    if cleantree: cleanparsetree(t)
                    if ttrace: print('Text: ', sent.text[txtstart:txtend])
                    if draw:
                        t.draw()
                    try:
                        H = t[()].label()['H']
                        if ttrace: print(H.get('category'), H.get('orth'))
                    except:
                        print('failure to get H')
                        H = None
                    if H:
                        DumpChars(taxonNo, flora, famname, taxname, mainsubject, subject, '', H, tokens, sent.text,
                                  txtstart + sent.slice.start, txtend + sent.slice.start, indent=1, cset=cfset)

            dtime = time.process_time() - ptime
            if trees:
                if outfile: print('Success: \n ' + phrase.text, file=outfile)
                if outfile: print('No. of trees: %d' % len(trees), 'ptime: ' + str(dtime), file=outfile)
                if ttrace:
                    for i, treex in enumerate(trees):
                        cleanparsetree(treex)
                        if draw: treex.draw()
                        if treefilebase and i <= 20:
                            tfilename = treefilebase + str(i)
                            tfile = open(tfilename, mode='w', encoding='utf-8')
                            print(treex, file=tfile)
                            tfile.close()
                            # print(FindNode('SUBJECT', trees[0]))
            else:
                if outfile: print('Fail:\n ' + phrase.text, file=outfile)
                trees = parser.partialparses()
                if outfile: print('No. of trees: %d' % len(trees), 'ptime: ' + str(dtime), file=outfile)
                # if ttrace and draw:
                #     for treex in trees[0:40]:
                #         cleanparsetree(treex)
                #         treex.draw()
                if trees:
                    print(FindNode('SUBJECT', trees[0]))

    return cfset
Esempio n. 29
0
    def _resolveNuclides(self, cs):
        """Expands the density of any elemental nuclides to its natural isotopics."""

        from armi import utils

        # expand burn-chain to only contain nuclides, no elements
        actives = set()
        inerts = set()
        undefBurnChainActiveNuclides = set()
        if self.nuclideFlags is None:
            self.nuclideFlags = genDefaultNucFlags()
        for nucFlag in self.nuclideFlags:
            nucFlag.prepForCase(actives, inerts, undefBurnChainActiveNuclides)

        inerts -= actives
        self.customIsotopics = self.customIsotopics or CustomIsotopics()
        self.elementsToExpand = []

        elementalsToSkip = self._selectNuclidesToExpandForModeling(cs)

        # if elementalsToSkip=[CR], we expand everything else. e.g. CR -> CR (unchanged)
        nucsFromInput = actives | inerts  # join

        for elemental in nuclideBases.instances:
            if not isinstance(elemental, nuclideBases.NaturalNuclideBase):
                continue
            if elemental.name not in nucsFromInput:
                continue

            # we've now confirmed this elemental is in the problem
            if elemental in elementalsToSkip:
                continue

            nucsInProblem = actives if elemental.name in actives else inerts
            nucsInProblem.remove(elemental.name)

            self.elementsToExpand.append(elemental.element)

            for nb in elemental.element.getNaturalIsotopics():
                nucsInProblem.add(nb.name)

        if self.elementsToExpand:
            runLog.info(
                "Expanding {} elementals to have natural isotopics".format(
                    ", ".join(element.symbol
                              for element in self.elementsToExpand)))

        self.activeNuclides = ordered_set.OrderedSet(sorted(actives))
        self.inertNuclides = ordered_set.OrderedSet(sorted(inerts))
        self.allNuclidesInProblem = ordered_set.OrderedSet(
            sorted(actives.union(inerts)))

        # Inform user that the burn-chain may not be complete
        if undefBurnChainActiveNuclides:
            runLog.info(
                tabulate.tabulate(
                    [[
                        "Nuclides truncating the burn-chain:",
                        utils.createFormattedStrWithDelimiter(
                            list(undefBurnChainActiveNuclides)),
                    ]],
                    tablefmt="plain",
                ),
                single=True,
            )
Esempio n. 30
0
def extend_source_file(
        working_directory,  # type: str
        tag_name,  # type: str
        specifiers,  # type: Iterable[str]
        extension='.in',  # type: str
        index_url=None,  # type: Optional[str]
        extra_index_urls=None,  # type: Optional[Set[str]]
        lookup_index_urls=None,  # type: Optional[Set[str]]
        prereleases=False,  # type: bool
        resolve_canonical_names=True,  # type: bool
        resolve_versions=True,  # type: bool
):
    # type: (...) -> None
    """Adds requirements to an existing requirement source file.

    Args:
        working_directory: The parent directory of the source file.
        tag_name: The tag name.
        specifiers: A list of specifiers.
        extension: The file extension. Defaults to *.in*.
        index_url: A Python package index URL.
        extra_index_urls: Extra Python package index URLs.
        lookup_index_urls: Python package index URLs used to search
            for packages during resolving. This parameter is only useful
            if an attempt is made to add packages found only in indexes
            that are only specified in nested requirement source files.
        prereleases: Whether or not to include prereleases.
        resolve_canonical_names: Queries package indexes provided by
            **index_urls** for the canonical name of each
            specifier. For example, *flask* will get resolved to
            *Flask*.
        resolve_versions: Queries package indexes for latest package
            versions.

    """
    if extra_index_urls is None:
        extra_index_urls = ordered_set.OrderedSet()
    else:
        extra_index_urls = ordered_set.OrderedSet(extra_index_urls)

    filename = build_filename(working_directory=working_directory,
                              tag_name=tag_name,
                              extension=extension)
    req_file = reqwire.helpers.requirements.RequirementFile(str(filename))
    if filename.exists():
        if index_url is not None and index_url != req_file.index_url:
            raise reqwire.errors.IndexUrlMismatchError('"{}" != "{}"'.format(
                index_url, req_file.index_url))
        elif index_url is None:
            index_url = req_file.index_url
            extra_index_urls |= req_file.extra_index_urls

    if lookup_index_urls is None:
        lookup_index_urls = {index_url} if index_url is not None else set()
        if extra_index_urls is not None:
            lookup_index_urls |= set(extra_index_urls)

        if not lookup_index_urls and req_file.index_urls:
            lookup_index_urls |= req_file.index_urls

    req_file.requirements |= reqwire.helpers.requirements.build_ireq_set(
        specifiers=specifiers,
        index_urls=lookup_index_urls,
        prereleases=prereleases,
        resolve_canonical_names=resolve_canonical_names,
        resolve_source_dir=str(pathlib.Path(working_directory).parent),
        resolve_versions=resolve_versions)

    resolved_requirements = reqwire.helpers.requirements.resolve_ireqs(
        requirements=req_file.requirements,
        prereleases=prereleases,
        intersect=True)

    nested_cfiles = ordered_set.OrderedSet(
        str(cf.filename.relative_to(filename.parent))
        for cf in req_file.nested_cfiles)
    nested_rfiles = ordered_set.OrderedSet(
        str(rf.filename.relative_to(filename.parent))
        for rf in req_file.nested_rfiles)

    reqwire.helpers.requirements.write_requirements(
        filename=str(filename),
        requirements=resolved_requirements,
        header=build_source_header(index_url=index_url,
                                   extra_index_urls=extra_index_urls,
                                   nested_cfiles=nested_cfiles,
                                   nested_rfiles=nested_rfiles))