def get_cluster_vis(RF_matrix, BSD_matrix):
    """
    Create UPGMA and NJ trees based on RF and BSD distances respectively
    """
    pdm_rf = dd.PhylogeneticDistanceMatrix.from_csv(src=StringIO(RF_matrix),\
                                                   delimiter=",")

    pdm_bsd = dd.PhylogeneticDistanceMatrix.from_csv(src=StringIO(BSD_matrix),\
                                                   delimiter=",")

    #UPGMA trees
    upgma_tree_rf = pdm_rf.upgma_tree()
    upgma_rf_str = clean_string(upgma_tree_rf.as_string("newick"))

    upgma_tree_bsd = pdm_bsd.upgma_tree()
    upgma_bsd_str = clean_string(upgma_tree_bsd.as_string("newick"))

    #Neighbor Joining trees
    nj_tree_rf = pdm_rf.nj_tree()
    nj_rf_str = clean_string(nj_tree_rf.as_string("newick"))

    nj_tree_bsd = pdm_bsd.nj_tree()
    nj_bsd_str = clean_string(nj_tree_bsd.as_string("newick"))

    #Create visualizations
    upgma_rf_tree = make_tree_vis(upgma_rf_str, "UPGMA RF", "UPGMA_RF")
    upgma_bsd_tree = make_tree_vis(upgma_bsd_str, "UPGMA BSD", "UPGMA_BSD")
    nj_rf_tree = make_tree_vis(nj_rf_str, "NJ RF", "NJ_RF")
    nj_bsd_tree = make_tree_vis(nj_bsd_str, "NJ BSD", "NJ_BSD")
Exemplo n.º 2
0
    def _write(self,
            stream,
            taxon_namespaces=None,
            tree_lists=None,
            char_matrices=None,
            global_annotations_target=None):

        # reset book-keeping
        self._taxon_namespaces_to_write = []
        self._taxon_namespace_id_map = {}
        self._taxon_id_map = {}
        self._node_id_map = {}
        self._state_alphabet_id_map = {}
        self._state_id_map = {}

        # Destination:
        # Writing to buffer instead of directly to output
        # stream so that all namespaces referenced in metadata
        # can be written
        body = StringIO()

        # comments and metadata
        self._write_annotations_and_comments(global_annotations_target, body, 1)

        # Taxon namespace discovery
        candidate_taxon_namespaces = collections.OrderedDict()
        if self.attached_taxon_namespace is not None:
            candidate_taxon_namespaces[self.attached_taxon_namespace] = True
        elif taxon_namespaces is not None:
            if self.suppress_unreferenced_taxon_namespaces:
                # preload to preserve order
                for tns in taxon_namespaces:
                    candidate_taxon_namespaces[tns] = False
            else:
                for tns in taxon_namespaces:
                    candidate_taxon_namespaces[tns] = True
        for data_collection in (tree_lists, char_matrices):
            if data_collection is not None:
                for i in data_collection:
                    if self.attached_taxon_namespace is None or i.taxon_namespace is self.attached_taxon_namespace:
                        candidate_taxon_namespaces[i.taxon_namespace] = True
        self._taxon_namespaces_to_write = [tns for tns in candidate_taxon_namespaces if candidate_taxon_namespaces[tns]]

        for tns in self._taxon_namespaces_to_write:
            self._write_taxon_namespace(tns, body)

        if char_matrices:
            for char_matrix in char_matrices:
                self._write_char_matrix(char_matrix=char_matrix, dest=body)

        if tree_lists:
            for tree_list in tree_lists:
                self._write_tree_list(tree_list=tree_list, dest=body)

        self._write_to_nexml_open(stream, indent_level=0)
        stream.write(body.getvalue())
        self._write_to_nexml_close(stream, indent_level=0)
Exemplo n.º 3
0
 def write_out_validate_equal_and_return(self, writeable, schema, kwargs):
     self.maxDiff = None
     sio = StringIO()
     writeable.write(file=sio, schema=schema, **kwargs)
     s0 = sio.getvalue()
     s1 = writeable.as_string(schema=schema, **kwargs)
     self.assertEqual(s0, s1)
     with pathmap.SandboxedFile() as tempf:
         writeable.write(path=tempf.name, schema=schema, **kwargs)
         tempf.flush()
         tempf.close()
         with open(tempf.name, "r") as src:
             s2 = src.read()
     self.assertEqual(s0, s2)
     return s0
Exemplo n.º 4
0
 def write_out_validate_equal_and_return(self, writeable, schema, kwargs):
     self.maxDiff = None
     sio = StringIO()
     writeable.write(file=sio, schema=schema, **kwargs)
     s0 = sio.getvalue()
     s1 = writeable.as_string(schema=schema, **kwargs)
     self.assertEqual(s0, s1)
     with pathmap.SandboxedFile() as tempf:
         writeable.write(path=tempf.name, schema=schema, **kwargs)
         tempf.flush()
         tempf.close()
         with open(tempf.name, "r") as src:
             s2 = src.read()
     self.assertEqual(s0, s2)
     return s0
Exemplo n.º 5
0
 def _read(self,
           stream,
           taxon_namespace_factory=None,
           tree_list_factory=None,
           char_matrix_factory=None,
           state_alphabet_factory=None,
           global_annotations_target=None):
     data = stream.read()
     start_positions = []
     for match in MultiPhylipReader.data_block_start_pattern.finditer(data):
         start_positions.append(match.start(1))
     if not start_positions:
         raise error.DataParseError("No PHYLIP data blocks found in source",
                                    stream=stream)
     char_matrices = []
     for idx, start_pos in enumerate(start_positions):
         if idx == len(start_positions) - 1:
             end_pos = len(data)
         else:
             end_pos = start_positions[idx + 1]
         block = data[start_pos:end_pos]
         src = StringIO(block)
         subproduct = self._phylip_reader._read(
             stream=src,
             taxon_namespace_factory=taxon_namespace_factory,
             tree_list_factory=tree_list_factory,
             char_matrix_factory=char_matrix_factory,
             state_alphabet_factory=state_alphabet_factory,
             global_annotations_target=global_annotations_target,
         )
         char_matrices.extend(subproduct.char_matrices)
     product = self.Product(taxon_namespaces=None,
                            tree_lists=None,
                            char_matrices=char_matrices)
     return product
Exemplo n.º 6
0
    def runTest(self):
        # rooted tree: so clade bitmasks
        tree_list = dendropy.TreeList.get_from_stream(
            StringIO("""[&R]((t5:0.161175,t6:0.161175):0.392293,((t4:0.104381,(t2:0.075411,t1:0.075411):1):0.065840,t3:0.170221):0.383247);"""),
            "newick")
        for i in tree_list:
            _LOG.debug(i._get_indented_form())
            i.encode_bipartitions()
            _LOG.debug(i._get_indented_form(splits=True))
            i._debug_check_tree(splits=True, logger_obj=_LOG)
        root1 = tree_list[0].seed_node
        root1e = root1.edge
        self.assertEqual(bitprocessing.indexes_of_set_bits(root1e.split_bitmask), list(range(6)))
        self.assertEqual(bitprocessing.indexes_of_set_bits(root1e.split_bitmask, one_based=True), list(range(1,7)))
        self.assertEqual(bitprocessing.indexes_of_set_bits(root1e.split_bitmask, fill_bitmask=21, one_based=True), [1, 3, 5])
        self.assertEqual(bitprocessing.indexes_of_set_bits(root1e.split_bitmask, fill_bitmask=21), [0, 2, 4])
        self.assertEqual(bitprocessing.num_set_bits(root1e.split_bitmask), 6)

        fc1 = root1.child_nodes()[0]
        fc1e = fc1.edge
        self.assertEqual(bitprocessing.indexes_of_set_bits(fc1e.split_bitmask), [0, 1])
        self.assertEqual(bitprocessing.indexes_of_set_bits(fc1e.split_bitmask, one_based=True), [1, 2])
        self.assertEqual(bitprocessing.indexes_of_set_bits(fc1e.split_bitmask, fill_bitmask=0x15, one_based=True), [1])
        self.assertEqual(bitprocessing.indexes_of_set_bits(fc1e.split_bitmask, fill_bitmask=0x15), [0])
        self.assertEqual(bitprocessing.num_set_bits(fc1e.split_bitmask), 2)
Exemplo n.º 7
0
 def test_comments(self):
     input_str = "([the quick]apple[brown],([fox]banjo,([jumps]cucumber[over the],[really]dogwood)[lazy]eggplant)) rhubarb[dog];"
     expected_comments = {
         "apple": ["the quick", "brown"],
         "banjo": ["fox"],
         "cucumber": ["jumps", "over the"],
         "dogwood": ["really"],
         "eggplant": ["lazy"],
         "rhubarb": ["dog"],
     }
     expected_tokens = [
         "(", "apple", ",", "(", "banjo", ",", "(", "cucumber", ",",
         "dogwood", ")", "eggplant", ")", ")", "rhubarb", ";"
     ]
     src = StringIO(input_str)
     observed_tokens = []
     tk = nexusprocessing.NexusTokenizer(src=src)
     for token in tk:
         if token in expected_comments:
             expected_comment = expected_comments[token]
             observed_comment = tk.pull_captured_comments()
             self.assertEqual(expected_comment, observed_comment)
             del expected_comments[token]
         observed_tokens.append(token)
     self.assertEqual(expected_comments, {})
     self.assertEqual(observed_tokens, expected_tokens)
Exemplo n.º 8
0
    def create_nj_newick(self, outputfile):
        pdm = dendropy.PhylogeneticDistanceMatrix.from_csv(src=StringIO(
            self.nj_distance_matrix_str()),
                                                           delimiter="\t")
        nj_tree = pdm.nj_tree()

        with open(outputfile, 'w') as fh:
            fh.write(nj_tree.as_string("newick"))

        return self
Exemplo n.º 9
0
 def testPrunedThenEncoding(self):
     inp = StringIO('''(a,b,c,(d,e));
     (b,d,(c,e));''')
     first, second = dendropy.TreeList.get_from_stream(inp, schema='newick')
     # prune tree 1 to have the same leaf set as tree 2.
     #   this removes the first taxon in the taxon list "A"
     retain_list = set([node.taxon for node in second.leaf_nodes()])
     exclude_list = [node for node in first.leaf_nodes() if node.taxon not in retain_list]
     for nd in exclude_list:
         first.prune_subtree(nd)
     # the trees are now (b,c,(d,e)) and (b,d,(c,e)) so the symmetric diff is 2
     self.assertEqual(2, treecompare.symmetric_difference(first, second))
Exemplo n.º 10
0
 def check_tokenization(self, input_str, expected_tokens):
     src = StringIO(input_str)
     observed = []
     for token in nexusprocessing.NexusTokenizer(src=src):
         observed.append(token)
     self.assertEqual(observed, expected_tokens)
Exemplo n.º 11
0
def description_text():
    from dendropy.utility.textprocessing import StringIO
    s = StringIO()
    description(s)
    return s.getvalue()
Exemplo n.º 12
0
def description_text():
    from dendropy.utility.textprocessing import StringIO
    s = StringIO()
    description(s)
    return s.getvalue()
Exemplo n.º 13
0
"""

# read the tree
tree = dendropy.Tree.get(
    data=phylogeny_str,
    schema="newick",
)

# obtain the PhylogeneticDistanceMatrix corresponding to the taxon-to-taxon
# distances of the above tree
pdm = tree.phylogenetic_distance_matrix()

# read the assemblage data into a table,
# being sure to specify an appropriate data type!
assemblage_data = container.DataTable.from_csv(
    src=StringIO(assemblage_data_table_str), default_data_type=int)

# generate the assemblage definitions
assemblage_names = []
assemblage_memberships = []
for row_name in assemblage_data.row_name_iter():
    assemblage_names.append(row_name)
    member_labels = set([
        col_name for col_name in assemblage_data.column_name_iter()
        if assemblage_data[row_name, col_name] > 0
    ])
    member_taxa = set(
        [t for t in pdm.taxon_namespace if t.label in member_labels])
    assemblage_memberships.append(member_taxa)

# calculate the SES statistics for each assemblage
Exemplo n.º 14
0
    def _write(self,
               stream,
               taxon_namespaces=None,
               tree_lists=None,
               char_matrices=None,
               global_annotations_target=None):

        # reset book-keeping
        self._taxon_namespaces_to_write = []
        self._taxon_namespace_id_map = {}
        self._taxon_id_map = {}
        self._node_id_map = {}
        self._state_alphabet_id_map = {}
        self._state_id_map = {}

        # Destination:
        # Writing to buffer instead of directly to output
        # stream so that all namespaces referenced in metadata
        # can be written
        body = StringIO()

        # comments and metadata
        self._write_annotations_and_comments(global_annotations_target, body,
                                             1)

        # Taxon namespace discovery
        candidate_taxon_namespaces = collections.OrderedDict()
        if self.attached_taxon_namespace is not None:
            candidate_taxon_namespaces[self.attached_taxon_namespace] = True
        elif taxon_namespaces is not None:
            if self.suppress_unreferenced_taxon_namespaces:
                # preload to preserve order
                for tns in taxon_namespaces:
                    candidate_taxon_namespaces[tns] = False
            else:
                for tns in taxon_namespaces:
                    candidate_taxon_namespaces[tns] = True
        for data_collection in (tree_lists, char_matrices):
            if data_collection is not None:
                for i in data_collection:
                    if self.attached_taxon_namespace is None or i.taxon_namespace is self.attached_taxon_namespace:
                        candidate_taxon_namespaces[i.taxon_namespace] = True
        self._taxon_namespaces_to_write = [
            tns for tns in candidate_taxon_namespaces
            if candidate_taxon_namespaces[tns]
        ]

        for tns in self._taxon_namespaces_to_write:
            self._write_taxon_namespace(tns, body)

        if char_matrices:
            for char_matrix in char_matrices:
                self._write_char_matrix(char_matrix=char_matrix, dest=body)

        if tree_lists:
            for tree_list in tree_lists:
                self._write_tree_list(tree_list=tree_list, dest=body)

        self._write_to_nexml_open(stream, indent_level=0)
        stream.write(body.getvalue())
        self._write_to_nexml_close(stream, indent_level=0)
Exemplo n.º 15
0
# read the tree
tree = dendropy.Tree.get(
        data=phylogeny_str,
        schema="newick",
        )

# obtain the PhylogeneticDistanceMatrix corresponding to the taxon-to-taxon
# distances of the above tree
pdm = tree.phylogenetic_distance_matrix()


# read the assemblage data into a table,
# being sure to specify an appropriate data type!
assemblage_data = container.DataTable.from_csv(
        src=StringIO(assemblage_data_table_str),
        default_data_type=int)

# generate the assemblage definitions
assemblage_membership_definitions = collections.OrderedDict()
for row_name in assemblage_data.row_name_iter():
    member_labels = set([col_name for col_name in assemblage_data.column_name_iter() if assemblage_data[row_name, col_name] > 0])
    member_taxa = set([t for t in pdm.taxon_namespace if t.label in member_labels])
    assemblage_membership_definitions[row_name] = member_taxa

# calculate the SES statistics for each assemblage
results_mpd = pdm.standardized_effect_size_mean_pairwise_distance(
        assemblage_memberships=assemblage_membership_definitions.values())

# inspect the results
print("Phylogenetic Community Standardized Effect Size Statistics:")
Exemplo n.º 16
0
    def __init__(self, **kwargs):
        """Keyword Arguments
        -----------------
        rooting : string, {['default-unrooted'], 'default-rooted', 'force-unrooted', 'force-rooted'}
            Specifies how trees in the data source should be intepreted with
            respect to their rooting:

                'default-unrooted' [default]:
                    All trees are interpreted as unrooted unless a '[&R]'
                    comment token explicitly specifies them as rooted.
                'default-rooted'
                    All trees are interpreted as rooted unless a '[&U]'
                    comment token explicitly specifies them as unrooted.
                'force-unrooted'
                    All trees are unconditionally interpreted as unrooted.
                'force-rooted'
                    All trees are unconditionally interpreted as rooted.

        edge_length_type : type, default: ``float``
            Specifies the type of the edge lengths (``int`` or ``float``). Tokens
            interpreted as branch lengths will be cast to this type.
            Defaults to ``float``.
        suppress_edge_lengths : boolean, default: |False|
            If |True|, edge length values will not be processed. If |False|,
            edge length values will be processed.
        extract_comment_metadata : boolean, default: |True|
            If |True| (default), any comments that begin with '&' or '&&' will
            be parsed and stored as part of the annotation set of the
            corresponding object (accessible through the ``annotations``
            attribute of the object). This requires that the comment
            contents conform to a particular format (NHX or BEAST: 'field =
            value'). If |False|, then the comments will not be parsed,
            but will be instead stored directly as elements of the ``comments``
            list attribute of the associated object.
        store_tree_weights : boolean, default: |False|
            If |True|, process the tree weight (e.g. "[&W 1/2]") comment
            associated with each tree, if any. Defaults to |False|.
        finish_node_fn : function object, default: |None|
            If specified, this function will be applied to each node after
            it has been constructed.
        case_sensitive_taxon_labels : boolean, default: |False|
            If |True|, then taxon labels are case sensitive (e.g., "P.regius"
            and "P.REGIUS" wil be treated as different operation taxonomic
            unit concepts). Otherwise, taxon label intepretation will be made
            without regard for case.
        preserve_underscores : boolean, default: |False|
            If |True|, unquoted underscores in labels will *not* converted to
            spaces. Defaults to |False|: all underscores not protected by
            quotes will be converted to spaces.
        suppress_internal_node_taxa : boolean, default: |True|
            If |False|, internal node labels will be instantantiated into
            |Taxon| objects. If |True|, internal node labels
            will *not* be instantantiated as strings.
        suppress_leaf_node_taxa : boolean, default: |False|
            If |False|, leaf (external) node labels will be instantantiated
            into |Taxon| objects. If |True|, leaff (external) node
            labels will *not* be instantantiated as strings.
        is_parse_jplace_tokens : boolean: |False|
            If |True|, then accept edge numbering according to the jplace
            format, as described in Matsen et. al. PLoS One, 2012
            http://dx.doi.org/10.1371/journal.pone.0031009. An instance variable
            edge_index is added to the returned tree, and an edge_number is
            added to each edge. If False [default], encountering edge labels
            raises a NewickReaderMalformedStatementError.
        is_assign_internal_labels_to_edges : boolean, default: |None|
            If |True|, internal node labels will be assigned as edge labels.
        terminating_semicolon_required : boolean, default: |True|
            If |True| [default], then a tree statement that does not end in a
            semi-colon is an error. If |False|, then no error will be raised.
        ignore_unrecognized_keyword_arguments : boolean, default: |False|
            If |True|, then unsupported or unrecognized keyword arguments will
            not result in an error. Default is |False|: unsupported keyword
            arguments will result in an error.

        """

        # base
        ioservice.DataReader.__init__(self)

        self._rooting = None
        ## (TEMPORARY and UGLY!!!!) Special handling for legacy signature
        if "as_unrooted" in kwargs or "as_rooted" in kwargs or "default_as_rooted" in kwargs or "default_as_unrooted" in kwargs:
            import collections
            legacy_kw = ("as_unrooted", "as_rooted", "default_as_rooted", "default_as_unrooted")
            legacy_kw_str = ", ".join("'{}'".format(k) for k in legacy_kw)
            if "rooting" in kwargs:
                raise ValueError("Cannot specify 'rooting' keyword argument in conjunction with any of the (legacy) keyword arguments ({}). Use 'rooting' alone.".format(legacy_kw_str))
            specs = collections.Counter(k for k in kwargs.keys() if k in legacy_kw)
            if sum(specs.values()) > 1:
                raise ValueError("Cannot specify more than one of {{ {} }} at the same time".format(legacy_kw_str))
            kw = list(specs.keys())[0]
            if kw == "as_unrooted":
                if kwargs[kw]:
                    corrected = "force-unrooted"
                else:
                    corrected = "force-rooted"
            elif kw == "as_rooted":
                if kwargs[kw]:
                    corrected = "force-rooted"
                else:
                    corrected = "force-unrooted"
            elif kw == "default_as_unrooted":
                if kwargs[kw]:
                    corrected = "default-unrooted"
                else:
                    corrected = "default-rooted"
            elif kw == "default_as_rooted":
                if kwargs[kw]:
                    corrected = "default-rooted"
                else:
                    corrected = "default-unrooted"
            msg = StringIO()
            # error.critical_deprecation_alert("\n{}\nUse of keyword argument '{}={}' is deprecated; use 'rooting=\"{}\"' instead".format(msg.getvalue(), kw, kwargs[kw], corrected),
            #         stacklevel=4)
            deprecate.dendropy_deprecation_warning(
                    preamble="Deprecated since DendroPy 4:",
                    old_construct="{}={}".format(kw, kwargs[kw]),
                    new_construct="rooting='{}'".format(corrected),
                    stacklevel=7)
            kwargs.pop(kw)
            kwargs["rooting"] = corrected
        if "allow_duplicate_taxon_labels" in kwargs:
            raise ValueError(
                "'allow_duplicate_taxon_labels' is no longer"
                " supported: trees with duplicate node labels can only be"
                " processed if the labels are not parsed as operational taxonomic"
                " unit concepts but instead as simply node labels by specifying"
                " 'suppress_internal_node_taxa=True, suppress_leaf_node_taxa=True'."
            )
        # self.rooting = kwargs.pop("rooting", "default-unrooted")
        self.rooting = kwargs.pop("rooting", self.__class__._default_rooting_directive)
        self.edge_length_type = kwargs.pop("edge_length_type", float)
        self.suppress_edge_lengths = kwargs.pop("suppress_edge_lengths", False)
        self.extract_comment_metadata = kwargs.pop('extract_comment_metadata', True)
        self.store_tree_weights = kwargs.pop("store_tree_weights", False)
        self.default_tree_weight = kwargs.pop("default_tree_weight", self.__class__._default_tree_weight)
        self.finish_node_fn = kwargs.pop("finish_node_fn", None)
        self.case_sensitive_taxon_labels = kwargs.pop('case_sensitive_taxon_labels', False)
        self.preserve_unquoted_underscores = kwargs.pop('preserve_underscores', False)
        self.suppress_internal_node_taxa = kwargs.pop("suppress_internal_node_taxa", True)
        self.suppress_leaf_node_taxa = kwargs.pop("suppress_external_node_taxa", False) # legacy (will be deprecated)
        self.suppress_leaf_node_taxa = kwargs.pop("suppress_leaf_node_taxa", self.suppress_leaf_node_taxa)
        self.is_parse_jplace_tokens = kwargs.pop("is_parse_jplace_tokens", False)
        self.is_assign_internal_labels_to_edges = kwargs.pop("is_assign_internal_labels_to_edges", None)
        if self.is_assign_internal_labels_to_edges and not self.suppress_internal_node_taxa:
            raise ValueError("Conflicting options: cannot simultaneously assign internal labels to edges and to internal taxa")
        self.terminating_semicolon_required = kwargs.pop("terminating_semicolon_required", True)
        self.check_for_unused_keyword_arguments(kwargs)

        # per-tree book-keeping
        self._tree_statement_complete = None
        self._parenthesis_nesting_level = None
        self._seen_taxa = None
Exemplo n.º 17
0
C5,0,0,0,0,0,0,0,35,14,10,0,0,0,0,0
"""

# read the tree
tree = dendropy.Tree.get(
    data=phylogeny_str,
    schema="newick",
)

# obtain the PhylogeneticDistanceMatrix corresponding to the taxon-to-taxon
# distances of the above tree
pdm = tree.phylogenetic_distance_matrix()

## read the assemblage memberships
assemblage_membership_definitions = pdm.assemblage_membership_definitions_from_csv(
    src=StringIO(assemblage_data_table_str), delimiter=",")

## calculate the results
results = pdm.standardized_effect_size_mean_pairwise_distance(
    assemblage_memberships=assemblage_membership_definitions.values())
assert len(results) == len(assemblage_membership_definitions)
for assemblage_name, result in zip(assemblage_membership_definitions, results):
    print("# Assemblage '{}' ({})".format(
        assemblage_name,
        sorted([
            t.label for t in assemblage_membership_definitions[assemblage_name]
        ])))
    print("   -     MPD: {}".format(result.obs))
    print("   - SES MPD: {}".format(result.z))
    print("   - p-value: {}".format(result.p))
Exemplo n.º 18
0
 def parse_string(self, source):
     "Loads an XML document from an XML string, source."
     s = StringIO(source)
     return self.parse_file(source)