示例#1
0
    def __init__(self, name, spec, arg=None):
        super().__init__(name, spec, arg)
        self.branches = {}

        def _sanitize(t):
            return galaxy_ui_var(value=galaxy_esc(str(t).replace('%', 'X')))

        for type_ in spec.qiime_type:
            if type_.predicate is not None and is_union(type_.predicate):
                for pred in type_.predicate.unpack_union():
                    new_type = type_.duplicate(predicate=pred)
                    self.branches[_sanitize(new_type)] = new_type
            elif (type_.predicate is not None
                  and type_.predicate.name == 'Choices'):
                for choice in type_.predicate.template.choices:
                    new_type = type_.duplicate(predicate=Choices(choice))
                    self.branches[galaxy_esc(choice)] = new_type
            elif type_.name == 'Bool':
                for choice in [True, False]:
                    new_type = type_.duplicate(predicate=Choices(choice))
                    self.branches[galaxy_esc(choice)] = new_type
            else:
                self.branches[_sanitize(type_)] = type_

        if self.spec.default is None:
            self.branches[galaxy_esc(None)] = {None}
示例#2
0
def bool_params():
    yield ParamTemplate('boolean', Bool, bool, (True, False))
    yield ParamTemplate('boolean_true', Bool % Choices(True), bool, (True, ))
    yield ParamTemplate('boolean_false', Bool % Choices(False), bool,
                        (False, ))
    yield ParamTemplate('boolean_choice', Bool % Choices(True, False), bool,
                        (True, False))
示例#3
0
def primitive_unions():
    yield ParamTemplate('disjoint', Int % (Range(5, 10) | Range(15, 20)), int,
                        (5, 9, 15, 19))
    yield ParamTemplate('auto_int',
                        Int % Range(1, None) | Str % Choices('auto'), object,
                        (1, 10, 'auto'))
    yield ParamTemplate(
        'kitchen_sink', Float % Range(0, 1) | Int
        | Str % Choices('auto', 'Beef') | Bool | Float % Range(10, 11), object,
        (0.5, 1000, 'Beef', 'auto', True, False, 10.103))
示例#4
0
 def test_primitive_predicate(self):
     self.assert_roundtrip(Int % Range(0, 10))
     self.assert_roundtrip(
         Int % (Range(0, 10) | Range(50, 100, inclusive_end=True)))
     self.assert_roundtrip(Float % Range(None, 10))
     self.assert_roundtrip(Float % Range(0, None))
     self.assert_roundtrip(Str % Choices("A"))
     self.assert_roundtrip(Str % Choices(["A"]))
     self.assert_roundtrip(Str % Choices("A", "B"))
     self.assert_roundtrip(Str % Choices(["A", "B"]))
     self.assert_roundtrip(Bool % Choices(True))
     self.assert_roundtrip(Bool % Choices(False))
示例#5
0
    def test_typevars(self):
        T, U, V, W, X = TypeMap({
            (Foo, Bar, Str % Choices('A', 'B')): (C1[Foo], C1[Bar]),
            (Foo | Bar, Foo, Str): (C1[Bar], C1[Foo])
        })

        scope = {}
        T1 = ast_to_type(T.to_ast(), scope=scope)
        U1 = ast_to_type(U.to_ast(), scope=scope)
        V1 = ast_to_type(V.to_ast(), scope=scope)
        W1 = ast_to_type(W.to_ast(), scope=scope)
        X1 = ast_to_type(X.to_ast(), scope=scope)

        self.assertEqual(len(scope), 1)
        self.assertEqual(scope[id(T.mapping)], [T1, U1, V1, W1, X1])

        self.assertEqual(T1.mapping.lifted, T.mapping.lifted)

        self.assertIs(T1.mapping, U1.mapping)
        self.assertIs(U1.mapping, V1.mapping)
        self.assertIs(V1.mapping, W1.mapping)
        self.assertIs(W1.mapping, X1.mapping)
示例#6
0
    package='q2_diversity',
    description=('This QIIME 2 plugin supports metrics for calculating '
                 'and exploring community alpha and beta diversity through '
                 'statistics and visualizations in the context of sample '
                 'metadata.'),
    short_description='Plugin for exploring community diversity.',
)

plugin.methods.register_function(
    function=q2_diversity.beta_phylogenetic,
    inputs={
        'table': FeatureTable[Frequency],
        'phylogeny': Phylogeny[Rooted]
    },
    parameters={
        'metric': Str % Choices(beta.phylogenetic_metrics()),
        'n_jobs': Int % Range(1, None)
    },
    outputs=[('distance_matrix', DistanceMatrix % Properties('phylogenetic'))],
    input_descriptions={
        'table': ('The feature table containing the samples over which beta '
                  'diversity should be computed.'),
        'phylogeny': ('Phylogenetic tree containing tip identifiers that '
                      'correspond to the feature identifiers in the table. '
                      'This tree can contain tip ids that are not present in '
                      'the table, but all feature ids in the table must be '
                      'present in this tree.')
    },
    parameter_descriptions={
        'metric': 'The beta diversity metric to be computed.',
        'n_jobs':
示例#7
0
inputs = {'table': FeatureTable[Frequency]}

input_descriptions = {
    'table': 'Feature table containing all features that '
    'should be used for target prediction.',
    'probabilities': 'Predicted class probabilities for '
    'each input sample.'
}

parameters = {
    'base': {
        'random_state': Int,
        'n_jobs': Int,
        'n_estimators': Int % Range(1, None),
        'missing_samples': Str % Choices(['error', 'ignore'])
    },
    'splitter': {
        'test_size':
        Float % Range(0.0, 1.0, inclusive_end=False, inclusive_start=False)
    },
    'rfe': {
        'step':
        Float % Range(0.0, 1.0, inclusive_end=False, inclusive_start=False),
        'optimize_feature_selection': Bool
    },
    'cv': {
        'cv': Int % Range(1, None),
        'parameter_tuning': Bool
    },
    'modified_metadata': {
示例#8
0
文件: _plot.py 项目: ebolyen/gneiss
        index_f.write('<html><body>\n')
        index_f.write('<h1>Dendrogram heatmap</h1>\n')
        index_f.write('<img src="heatmap.svg" alt="heatmap">')
        index_f.write('</body></html>\n')


plugin.visualizers.register_function(
    function=dendrogram_heatmap,
    inputs={
        'table': FeatureTable[Composition],
        'tree': Phylogeny[Rooted]
    },
    parameters={
        'metadata': MetadataCategory,
        'ndim': Int,
        'method': Str % Choices(_transform_methods),
        'color_map': Str % Choices(_mpl_colormaps)
    },
    input_descriptions={
        'table': ('The feature table that will be plotted as a heatmap. '
                  'This table is assumed to have strictly positive values.'),
        'tree': ('A hierarchy of feature identifiers where each tip'
                 'corresponds to the feature identifiers in the table. '
                 'This tree can contain tip ids that are not present in '
                 'the table, but all feature ids in the table must be '
                 'present in this tree.')
    },
    parameter_descriptions={
        'metadata': ('Metadata to group the samples. '),
        'ndim':
        'Number of dimensions to highlight.',
示例#9
0
from q2_types.tree import Phylogeny, Rooted
from q2_types.ordination import PCoAResults


plugin = Plugin(
    name='diversity',
    version=q2_diversity.__version__,
    website='https://github.com/qiime2/q2-diversity',
    package='q2_diversity'
)

plugin.methods.register_function(
    function=q2_diversity.beta_phylogenetic,
    inputs={'table': FeatureTable[Frequency] % Properties('uniform-sampling'),
            'phylogeny': Phylogeny[Rooted]},
    parameters={'metric': Str % Choices(beta.phylogenetic_metrics())},
    outputs=[('distance_matrix', DistanceMatrix % Properties('phylogenetic'))],
    input_descriptions={
        'table': ('The feature table containing the samples over which beta '
                  'diversity should be computed.'),
        'phylogeny': ('Phylogenetic tree containing tip identifiers that '
                      'correspond to the feature identifiers in the table. '
                      'This tree can contain tip ids that are not present in '
                      'the table, but all feature ids in the table must be '
                      'present in this tree.')
    },
    parameter_descriptions={
        'metric': 'The beta diversity metric to be computed.'
    },
    output_descriptions={'distance_matrix': 'The resulting distance matrix.'},
    name='Beta diversity (phylogenetic)',
示例#10
0
        'details. (Use 0 to automatically use all available '
        'cores)'
    },
    output_descriptions={'tree': 'The resulting phylogenetic tree.'},
    name='Construct a phylogenetic tree with FastTree.',
    description=("Construct a phylogenetic tree with FastTree."),
    citations=[citations['price2010fasttree']])

plugin.methods.register_function(
    function=q2_phylogeny.raxml,
    inputs={'alignment': FeatureData[AlignedSequence]},
    parameters={
        'seed': Int,
        'n_searches': Int % Range(1, None),
        'n_threads': Int % Range(1, None),
        'substitution_model': Str % Choices(_RAXML_MODEL_OPT),
        'raxml_version': Str % Choices(_RAXML_VERSION_OPT)
    },
    outputs=[('tree', Phylogeny[Unrooted])],
    input_descriptions={
        'alignment': ('Aligned sequences to be used for phylogenetic '
                      'reconstruction.'),
    },
    parameter_descriptions={
        'n_searches': ('The number of independent maximum likelihood '
                       'searches to perform. The single best scoring '
                       'tree is returned.'),
        'n_threads': ('The number of threads to use for multithreaded '
                      'processing. Using more than one thread '
                      'will enable the PTHREADS version of RAxML.'),
        'raxml_version': ('Select a specific CPU optimization of RAxML to '
示例#11
0
plugin.register_formats(GraphModelingLanguageFormat)
plugin.register_formats(GraphModelingLanguageDirectoryFormat)
plugin.register_formats(PairwiseFeatureDataFormat)
plugin.register_formats(PairwiseFeatureDataDirectoryFormat)

plugin.register_semantic_type_to_format(
    Network, artifact_format=GraphModelingLanguageDirectoryFormat)
plugin.register_semantic_type_to_format(
    PairwiseFeatureData, artifact_format=PairwiseFeatureDataDirectoryFormat)

plugin.methods.register_function(
    function=calculate_correlations,
    inputs={'table': FeatureTable[Frequency]
            },  # TODO: Generalize, don't require frequency
    parameters={
        'corr_method': Str % Choices(["kendall", "pearson", "spearman"]),
        'p_adjustment_method': Str
    },
    outputs=[('correlation_table', PairwiseFeatureData)],
    input_descriptions={
        'table':
        ('Normalized and filtered feature table to use for microbial interdependence test.'
         )
    },
    parameter_descriptions={
        'corr_method':
        'The correlation test to be applied.',
        'p_adjustment_method':
        'The method for p-value adjustment to be applied. '
        'This can be selected from the list of methods in '
        'statsmodels multipletests'
示例#12
0
	name='Spatial Ornstein Uhlenbeck microbial community simulation',
	description=('This method simulates microbial behavior over time using'
				+'Ornstein Uhlenbeck models. This are similar to Brownian Motion'
				+'with the exception that they include reversion to a mean.')
)
""""""
	
# Modify to allow for PCoAResults as input	
plugin.visualizers.register_function(
    function=fit_timeseries,
    inputs={
        'pcoa' : PCoAResults
    },
    parameters={
                                     #'pcoa':Str,
		'method':Str % Choices({'basinhopping'}),
	    'metadata':Str,
	    'individual_col':Str,
	    'timepoint_col':Str,
	    'treatment_col':Str
    },
    input_descriptions = { 'pcoa':'filepath to PCoA results' }
	parameter_descriptions = {
	    
		'method':'global optimization method',
	    'metadata':'filepath to Sample metadata',
	    'individual_col':'individual column identifier',
	    'timepoint_col':'timepoint column identifier',
	    'treatment_col':'treatment column identifier'
    },
    name='Fit OU Models to PCoA Ordination output',
示例#13
0
 def test_collection_primitive(self):
     self.assert_roundtrip(Set[Str % Choices('A', 'B', 'C')])
     self.assert_roundtrip(List[Int % Range(1, 3, inclusive_end=True)
                                | Str % Choices('A', 'B', 'C')])
示例#14
0
        'Plugin for quality control of feature and sequence data.')
)


seq_inputs = {'query_sequences': FeatureData[Sequence],
              'reference_sequences': FeatureData[Sequence]}

seq_inputs_descriptions = {
    'query_sequences': 'Sequences to test for exclusion',
    'reference_sequences': ('Reference sequences to align against feature '
                            'sequences')}

taxa_inputs = {'depth': Int,
               'palette': Str % Choices([
                    'Set1', 'Set2', 'Set3', 'Pastel1', 'Pastel2', 'Paired',
                    'Accent', 'Dark2', 'tab10', 'tab20', 'tab20b', 'tab20c',
                    'viridis', 'plasma', 'inferno', 'magma', 'terrain',
                    'rainbow'])}

taxa_inputs_descriptions = {
    'depth': 'Maximum depth of semicolon-delimited taxonomic ranks to '
             'test (e.g., 1 = root, 7 = species for the greengenes '
             'reference sequence database).',
    'palette': 'Color palette to utilize for plotting.'}


plugin.methods.register_function(
    function=exclude_seqs,
    inputs=seq_inputs,
    parameters={'method': Str % Choices(['blast', 'vsearch', 'blastn-short']),
                'perc_identity': Float % Range(0.0, 1.0, inclusive_end=True),
示例#15
0
    description=('This QIIME 2 plugin supports metrics for calculating '
                 'and exploring community alpha and beta diversity through '
                 'statistics and visualizations in the context of sample '
                 'metadata.'),
    short_description='Plugin for exploring community diversity.',
)

plugin.pipelines.register_function(
    function=q2_diversity.beta_phylogenetic,
    inputs={
        'table': FeatureTable[Frequency | RelativeFrequency | PresenceAbsence],
        'phylogeny': Phylogeny[Rooted]
    },
    parameters={
        'metric':
        Str % Choices(beta.METRICS['PHYLO']['IMPL']
                      | beta.METRICS['PHYLO']['UNIMPL']),
        'threads':
        Int % Range(1, None) | Str % Choices(['auto']),
        'variance_adjusted':
        Bool,
        'alpha':
        Float % Range(0, 1, inclusive_end=True),
        'bypass_tips':
        Bool
    },
    outputs=[('distance_matrix', DistanceMatrix)],
    input_descriptions={
        'table': ('The feature table containing the samples over which beta '
                  'diversity should be computed.'),
        'phylogeny': ('Phylogenetic tree containing tip identifiers that '
                      'correspond to the feature identifiers in the table. '
示例#16
0
    parameters={},
    outputs=[
        ('x', R)
    ],
    name="Double Bound Variable Method",
    description="Test reuse of variables"
)
del T, R


def bool_flag_swaps_output_method(a: EchoFormat, b: bool) -> EchoFormat:
    return a


P, R = TypeMap({
    Choices(True): C1[Foo],
    Choices(False): Foo
})
dummy_plugin.methods.register_function(
    function=bool_flag_swaps_output_method,
    inputs={
        'a': Bar
    },
    parameters={
        'b': Bool % P
    },
    outputs=[
        ('x', R)
    ],
    name='Bool Flag Swaps Output Method',
    description='Test if a parameter can change output'
示例#17
0
                             'without replacement.')
    },
    output_descriptions={
        'rarefied_table': 'The resulting rarefied feature table.'
    },
    name='Rarefy table',
    description=("Subsample frequencies from all samples so that the sum of "
                 "frequencies in each sample is equal to sampling-depth."),
    citations=[citations['Weiss2017']])

plugin.methods.register_function(
    function=q2_feature_table.subsample,
    inputs={'table': FeatureTable[Frequency]},
    parameters={
        'subsampling_depth': Int % Range(1, None),
        'axis': Str % Choices(['sample', 'feature'])
    },
    outputs=[('sampled_table', FeatureTable[Frequency])],
    input_descriptions={'table': 'The feature table to be sampled.'},
    parameter_descriptions={
        'subsampling_depth': ('The total number of samples or features to be '
                              'randomly sampled. Samples or features that are '
                              'reduced to a zero sum will not be included in '
                              'the resulting table.'),
        'axis': ('The axis to sample over. If "sample" then samples will be '
                 'randomly selected to be retained. If "feature" then '
                 'a random set of features will be selected to be retained.')
    },
    output_descriptions={
        'sampled_table': 'The resulting subsampled feature table.'
    },
示例#18
0
                     'information.'),
    'replicate_handling': (
        'Choose how replicate samples are handled. If replicates are '
        'detected, "error" causes method to fail; "drop" will discard all '
        'replicated samples; "random" chooses one representative at random '
        'from among replicates.')
}



plugin.methods.register_function(
    function=beta_dispersion,
    inputs={'distance_matrix': DistanceMatrix},
    parameters={ 'metadata': Metadata,
                 'palette': Str % Choices([
                      'Set1', 'Set2', 'Set3', 'Pastel1', 'Pastel2', 'Paired', 'Accent',
                      'Dark2', 'tab10', 'tab20', 'tab20b', 'tab20c', 'viridis', 'plasma',
                      'inferno', 'magma', 'terrain', 'rainbow']),
                'metric': Str,
                'group_column': Str,
                'replicate_handling': miscellaneous_parameters['replicate_handling']
                'state_column': miscellaneous_parameters['state_column'],
    outputs=[('beta_volaility', SampleData[FirstDifferences])], #UNKNOWN WHAT TO PUT INSTEAD OF SAMPLEDATA
    input_descriptions={
        'distance_matrix': 'Matrix of distances between pairs of samples.'},
    parameter_descriptions={
        'metadata': ('Metadata'),
        'palette': Str % Choices([
            'Set1', 'Set2', 'Set3', 'Pastel1', 'Pastel2', 'Paired', 'Accent',
            'Dark2', 'tab10', 'tab20', 'tab20b', 'tab20c', 'viridis', 'plasma',
            'inferno', 'magma', 'terrain', 'rainbow']
        'metric': ('The beta diversity metric used to compute the beta distance matrix.'
示例#19
0
                                        artifact_format=NexusDirFmt)

importlib.import_module('q2_beast.transformers')

NONZERO_INT = Int % Range(1, None)
NONNEGATIVE_INT = Int % Range(0, None)

plugin.methods.register_function(
    function=gtr_single_partition,
    inputs={'alignment': FeatureData[AlignedSequence]},
    parameters={
        'time': MetadataColumn[Numeric],
        'n_generations': NONZERO_INT,
        'sample_every': NONZERO_INT,
        'time_uncertainty': MetadataColumn[Numeric],
        'base_freq': Str % Choices("estimated", "empirical"),
        'site_gamma': Int % Range(0, 10, inclusive_end=True),
        'site_invariant': Bool,
        'clock': Str % Choices("ucln", "strict"),
        'coalescent_model':
        Str % Choices("skygrid", "constant", "exponential"),
        'skygrid_intervals': NONZERO_INT,
        'skygrid_duration': Float % Range(0, None, inclusive_start=False),
        'print_every': NONZERO_INT,
        'use_gpu': Bool,
        'n_threads': NONZERO_INT
    },
    outputs=[('chain', Chain[BEAST])],
    input_descriptions={
        'alignment': 'The alignment to construct a tree with.',
    },
示例#20
0
    description=(
        'Evaluate taxonomic classification accuracy by comparing one or more '
        'sets of true taxonomic labels to the predicted taxonomies for the '
        'same set(s) of features. Output an interactive line plot of '
        'classification accuracy for each pair of expected/observed '
        'taxonomies. ' + VOLATILITY_PLOT_XAXIS_INTERPRETATION),
    citations=[citations['bokulich2018optimizing'],
               citations['bokulich2017q2']]
)


plugin.methods.register_function(
    function=merge_taxa,
    inputs={'data': List[FeatureData[Taxonomy]]},
    parameters={
        'mode': Str % Choices(['len', 'lca', 'score', 'super', 'majority']),
        'rank_handle_regex': Str,
        'new_rank_handle': Str % Choices(list(_rank_handles.keys()))},
    outputs=[('merged_data', FeatureData[Taxonomy])],
    input_descriptions={
        'data': 'Two or more feature taxonomies to be merged.'},
    parameter_descriptions={
        'mode': 'How to merge feature taxonomies: "len" will select the '
                'taxonomy with the most elements (e.g., species level will '
                'beat genus level); "lca" will find the least common ancestor '
                'and report this consensus taxonomy; "score" will select the '
                'taxonomy with the highest score (e.g., confidence or '
                'consensus score). Note that "score" assumes that this score '
                'is always contained as the second column in a feature '
                'taxonomy dataframe. "majority" finds the LCA consensus while '
                'giving preference to majority labels. ' + super_lca_desc,
示例#21
0
plugin.register_semantic_types(ZodiacFolder)
plugin.register_semantic_type_to_format(ZodiacFolder,
                                        artifact_format=ZodiacDirFmt)

plugin.register_views(CSIDirFmt)
plugin.register_semantic_types(CSIFolder)
plugin.register_semantic_type_to_format(CSIFolder,
                                        artifact_format=CSIDirFmt)

plugin.register_views(TSVMoleculesFormat)
plugin.register_semantic_types(Molecules)
plugin.register_semantic_type_to_format(FeatureData[Molecules],
                                        artifact_format=TSVMoleculesFormat)

PARAMS = {
    'ionization_mode': Str % Choices(['positive', 'negative', 'auto']),
    'database': Str % Choices(['all', 'pubchem']),
    'sirius_path': Str,
    'profile': Str % Choices(['qtof', 'orbitrap', 'fticr']),
    'fingerid_db': Str % Choices(['all', 'pubchem', 'bio', 'kegg', 'hmdb']),
    'ppm_max': Int % Range(0, 30, inclusive_end=True),
    'n_jobs': Int % Range(1, None),
    'num_candidates': Int % Range(5, 100, inclusive_end=True),
    'tree_timeout': Int % Range(600, 3000, inclusive_end=True),
    'maxmz': Int % Range(100, 850, inclusive_end=True),
    'zodiac_threshold': Float % Range(0, 1, inclusive_end=True),
    'java_flags': Str
}

PARAMS_DESC = {
    'ionization_mode': 'Ionization mode for mass spectrometry',
示例#22
0
    name=('Interactively evaluate taxonomic classification accuracy.'),
    description=(
        'Evaluate taxonomic classification accuracy by comparing one or more '
        'sets of true taxonomic labels to the predicted taxonomies for the '
        'same set(s) of features. Output an interactive line plot of '
        'classification accuracy for each pair of expected/observed '
        'taxonomies. ' + VOLATILITY_PLOT_XAXIS_INTERPRETATION),
    citations=[
        citations['bokulich2018optimizing'], citations['bokulich2017q2']
    ])

plugin.methods.register_function(
    function=merge_taxa,
    inputs={'data': List[FeatureData[Taxonomy]]},
    parameters={
        'mode': Str % Choices(['len', 'lca', 'score', 'super', 'majority']),
        'rank_handle_regex': Str,
        'new_rank_handle': Str % Choices(list(_rank_handles.keys()))
    },
    outputs=[('merged_data', FeatureData[Taxonomy])],
    input_descriptions={
        'data': 'Two or more feature taxonomies to be merged.'
    },
    parameter_descriptions={
        'mode':
        'How to merge feature taxonomies: "len" will select the '
        'taxonomy with the most elements (e.g., species level will '
        'beat genus level); "lca" will find the least common ancestor '
        'and report this consensus taxonomy; "score" will select the '
        'taxonomy with the highest score (e.g., confidence or '
        'consensus score). Note that "score" assumes that this score '
示例#23
0
plugin.register_semantic_type_to_format(ReferenceSequence,
                                        DNASequencesDirectoryFormat)
plugin.register_semantic_type_to_format(SampleData[PileUp], PileUpFilesDirFmt)
plugin.register_semantic_type_to_format(SampleData[AlignmentMap],
                                        BAMFilesDirFmt)
plugin.register_semantic_type_to_format(SampleData[ConsensusSequences],
                                        FASTAFilesDirFmt)

importlib.import_module('q2_phylogenomics._transformers')

prinseq_input = {'demultiplexed_sequences': 'The sequences to be trimmed.'}
prinseq_output = {'trimmed_sequences': 'The resulting trimmed sequences.'}

prinseq_parameters = {
    'trim_qual_right': Int % Range(1, None),
    'trim_qual_type': Str % Choices(['min', 'mean', 'max', 'sum']),
    'trim_qual_window': Int % Range(1, None),
    'min_qual_mean': Int % Range(1, None),
    'min_len': Int % Range(1, None),
    'lc_method': Str % Choices(['dust', 'entropy']),
    'lc_threshold': Int % Range(0, 100),
    'derep': List[Str % Choices(list('12345'))]
}

prinseq_parameter_descriptions = {
    'trim_qual_right':
    'Trim sequence by quality score from the 3\'-end with '
    'this threshold score.',
    'trim_qual_type':
    'Type of quality score calculation to use. Allowed '
    'options are min, mean, max and sum.',
示例#24
0
              'and reconstructs them into a single table with region-'
              'normalized abundance counts.'),
 inputs={
     'regional_alignment': List[FeatureData[KmerAlignment]],
     'regional_table': List[FeatureTable[Frequency]],
     'database_map': FeatureData[SidleReconstruction],
     'database_summary': FeatureData[ReconstructionSummary],
 },
 outputs=[
     ('reconstructed_table', FeatureTable[Frequency]),
 ],
 parameters={
     'region': List[Str],
     'per_nucleotide_error': Float % Range(0, 1),
     'min_abund': Float % Range(0, 1),
     'region_normalize': Str % Choices('average', 'weighted', 'unweighted'),
     'min_counts': Int % Range(0, None),
     'block_size': Int,
     'n_workers': Int % Range(1, None),
     'client_address': Str,
     'debug': Bool,
 },
 input_descriptions={
     'regional_alignment': ('A mapping between the kmer names (in the kmer'
                            ' map) and the features (found in the regional'
                            ' table)'),
     'regional_table': ('A feature-table for each region, where  the '
                        'features in the table correspond to the ASVs '
                        'which were aligned in the regional alignment '
                        'artifact'),
     'database_map': ('A map between the final kmer name and the '
        'pseudocount': 'The value to add to all counts in the feature table.'
    },
    output_descriptions={'composition_table': 'The resulting feature table.'},
    name='Add pseudocount to table',
    description="Increment all counts in table by pseudocount.")

_ancom_statistical_tests = q2_composition._ancom.statistical_tests()
_transform_functions = q2_composition._ancom.transform_functions()
_difference_functions = q2_composition._ancom.difference_functions()

plugin.visualizers.register_function(
    function=q2_composition.ancom,
    inputs={'table': FeatureTable[Composition]},
    parameters={
        'metadata': MetadataCategory,
        'statistical_test': Str % Choices(_ancom_statistical_tests),
        'transform_function': Str % Choices(_transform_functions),
        'difference_function': Str % Choices(_difference_functions)
    },
    input_descriptions={
        'table': 'The feature table to be used for ANCOM computation.'
    },
    parameter_descriptions={
        'metadata': ('The sample metadata category to test for '
                     'differential abundance across.'),
        'statistical_test': ('The test to be applied to detect '
                             'differential abundance across groups.'),
        'transform_function': ('The method applied to transform feature '
                               'values before generating volcano plots.'),
        'difference_function':
        'The method applied to visualize fold '
示例#26
0
plugin.methods.register_function(
    function=q2_phylogeny.midpoint_root,
    inputs={'tree': Phylogeny[Unrooted]},
    parameters={},
    outputs=[('rooted_tree', Phylogeny[Rooted])],
    input_descriptions={'tree': 'The phylogenetic tree to be rooted.'},
    parameter_descriptions={},
    output_descriptions={'rooted_tree': 'The rooted phylogenetic tree.'},
    name='Midpoint root an unrooted phylogenetic tree.',
    description=("Midpoint root an unrooted phylogenetic tree."))

plugin.methods.register_function(
    function=q2_phylogeny.fasttree,
    inputs={'alignment': FeatureData[AlignedSequence]},
    parameters={'n_threads': Int % Range(1, None) | Str % Choices(['auto'])},
    outputs=[('tree', Phylogeny[Unrooted])],
    input_descriptions={
        'alignment': ('Aligned sequences to be used for phylogenetic '
                      'reconstruction.')
    },
    parameter_descriptions={
        'n_threads':
        'The number of threads. Using more than one thread '
        'runs the non-deterministic variant of `FastTree` '
        '(`FastTreeMP`), and may result in a different tree than '
        'single-threading. See '
        'http://www.microbesonline.org/fasttree/#OpenMP for '
        'details. (Use `auto` to automatically use all available '
        'cores)'
    },
示例#27
0
    function=params_only_method,
    inputs={},
    parameters={
        'name': Str,
        'age': Int
    },
    outputs=[('out', Mapping)],
    name='Parameters only method',
    description='This method only accepts parameters.',
)

dummy_plugin.methods.register_function(
    function=unioned_primitives,
    inputs={},
    parameters={
        'foo': Int % Range(1, None) | Str % Choices(['auto_foo']),
        'bar': Int % Range(1, None) | Str % Choices(['auto_bar']),
    },
    outputs=[('out', Mapping)],
    name='Unioned primitive parameter',
    description='This method has a unioned primitive parameter')

dummy_plugin.methods.register_function(
    function=no_input_method,
    inputs={},
    parameters={},
    outputs=[('out', Mapping)],
    name='No input method',
    description='This method does not accept any type of input.')

dummy_plugin.methods.register_function(
示例#28
0
        raise RuntimeError("No matches found")
    return ff


plugin.methods.register_function(
    function=extract_reads,
    inputs={'sequences': FeatureData[Sequence]},
    parameters={'trunc_len': Int,
                'trim_left': Int,
                'f_primer': Str,
                'r_primer': Str,
                'identity': Float,
                'min_length': Int % Range(0, None),
                'max_length': Int % Range(0, None),
                'n_jobs': Int % Range(1, None),
                'batch_size': Int % Range(1, None) | Str % Choices(['auto'])},
    outputs=[('reads', FeatureData[Sequence])],
    name='Extract reads from reference',
    description='Extract sequencing-like reads from a reference database.',
    parameter_descriptions={'f_primer': 'forward primer sequence',
                            'r_primer': 'reverse primer sequence',
                            'trunc_len': 'read is cut to trunc_len if '
                                         'trunc_len is positive. Applied '
                                         'before trim_left.',
                            'trim_left': 'trim_left nucleotides are removed '
                                         'from the 5\' end if trim_left is '
                                         'positive. Applied after trunc_len.',
                            'identity': 'minimum combined primer match '
                                        'identity threshold.',
                            'min_length': 'Minimum amplicon length. Shorter '
                                          'amplicons are discarded. Applied '
示例#29
0
                                           'a': T,
                                           'b': T,
                                           'extra': Foo
                                       },
                                       parameters={},
                                       outputs=[('x', R)],
                                       name="Double Bound Variable Method",
                                       description="Test reuse of variables")
del T, R


def bool_flag_swaps_output_method(a: EchoFormat, b: bool) -> EchoFormat:
    return a


P, R = TypeMap({Choices(True): C1[Foo], Choices(False): Foo})
dummy_plugin.methods.register_function(
    function=bool_flag_swaps_output_method,
    inputs={'a': Bar},
    parameters={'b': Bool % P},
    outputs=[('x', R)],
    name='Bool Flag Swaps Output Method',
    description='Test if a parameter can change output')
del P, R


def predicates_preserved_method(a: EchoFormat) -> EchoFormat:
    return a


P = TypeMatch(
示例#30
0
citations = Citations.load('citations.bib', package='q2_alignment')
plugin = Plugin(
    name='alignment',
    version=q2_alignment.__version__,
    website='https://github.com/qiime2/q2-alignment',
    package='q2_alignment',
    description=('This QIIME 2 plugin provides support for generating '
                 'and manipulating sequence alignments.'),
    short_description='Plugin for generating and manipulating alignments.')

plugin.methods.register_function(
    function=q2_alignment.mafft,
    inputs={'sequences': FeatureData[Sequence]},
    parameters={
        'n_threads': Int % Range(1, None) | Str % Choices(['auto']),
        'parttree': Bool
    },
    outputs=[('alignment', FeatureData[AlignedSequence])],
    input_descriptions={'sequences': 'The sequences to be aligned.'},
    parameter_descriptions={
        'n_threads':
        'The number of threads. (Use `auto` to automatically use '
        'all available cores)',
        'parttree':
        'This flag is required if the number of sequences being '
        'aligned are larger than 1000000. Disabled by default'
    },
    output_descriptions={'alignment': 'The aligned sequences.'},
    name='De novo multiple sequence alignment with MAFFT',
    description=("Perform de novo multiple sequence alignment using MAFFT."),