def _generic_plot(output_dir: str, master: skbio.OrdinationResults, metadata: qiime2.Metadata, other_pcoa: skbio.OrdinationResults, plot_name, custom_axes: str=None): mf = metadata.to_dataframe() if other_pcoa is None: procrustes = None else: procrustes = [other_pcoa] viz = Emperor(master, mf, procrustes=procrustes, remote='.') if custom_axes is not None: viz.custom_axes = custom_axes if other_pcoa: viz.procrustes_names = ['reference', 'other'] html = viz.make_emperor(standalone=True) viz.copy_support_files(output_dir) with open(os.path.join(output_dir, 'emperor.html'), 'w') as fh: fh.write(html) index = os.path.join(TEMPLATES, 'index.html') q2templates.render(index, output_dir, context={'plot_name': plot_name})
def _generic_plot(output_dir: str, master: skbio.OrdinationResults, metadata: qiime2.Metadata, other_pcoa: skbio.OrdinationResults, plot_name, custom_axes: str=None, feature_metadata: qiime2.Metadata=None): mf = metadata.to_dataframe() if feature_metadata is not None: feature_metadata = feature_metadata.to_dataframe() if other_pcoa is None: procrustes = None else: procrustes = [other_pcoa] viz = Emperor(master, mf, feature_mapping_file=feature_metadata, procrustes=procrustes, remote='.') if custom_axes is not None: viz.custom_axes = custom_axes if other_pcoa: viz.procrustes_names = ['reference', 'other'] html = viz.make_emperor(standalone=True) viz.copy_support_files(output_dir) with open(os.path.join(output_dir, 'emperor.html'), 'w') as fh: fh.write(html) index = os.path.join(TEMPLATES, 'index.html') q2templates.render(index, output_dir, context={'plot_name': plot_name})
def plot(output_dir: str, pcoa: skbio.OrdinationResults, metadata: qiime2.Metadata, custom_axis: str=None) -> None: mf = metadata.to_dataframe() viz = Emperor(pcoa, mf, remote='.') if custom_axis is not None: # put custom_axis inside a list to workaround the type system not # supporting lists of types html = viz.make_emperor(standalone=True, custom_axes=[custom_axis]) else: html = viz.make_emperor(standalone=True) viz.copy_support_files(output_dir) with open(os.path.join(output_dir, 'emperor.html'), 'w') as fh: fh.write(html) index = os.path.join(TEMPLATES, 'index.html') q2templates.render(index, output_dir)
def plot(output_dir: str, pcoa: skbio.OrdinationResults, metadata: qiime2.Metadata, custom_axis: str = None) -> None: mf = metadata.to_dataframe() viz = Emperor(pcoa, mf, remote='.') if custom_axis is not None: # put custom_axis inside a list to workaround the type system not # supporting lists of types html = viz.make_emperor(standalone=True, custom_axes=[custom_axis]) else: html = viz.make_emperor(standalone=True) viz.copy_support_files(output_dir) with open(os.path.join(output_dir, 'emperor.html'), 'w') as fh: fh.write(html) index = os.path.join(TEMPLATES, 'index.html') q2templates.render(index, output_dir)
def _generate_ordination_results_summary(files, metadata, out_dir): # Magic number [0] -> there is only one plain text file and it is the # ordination results ord_res = OrdinationResults.read(files['plain_text'][0]) md_df = pd.DataFrame.from_dict(metadata, orient='index') emp = Emperor(ord_res, md_df, remote="emperor_support_files") html_summary_fp = join(out_dir, 'index.html') esf_dp = join(out_dir, 'emperor_support_files') makedirs(esf_dp) with open(html_summary_fp, 'w') as f: f.write(emp.make_emperor(standalone=True)) emp.copy_support_files(esf_dp) return html_summary_fp, esf_dp
def plot(output_dir: str, sample_metadata: qiime.Metadata, pcoa: skbio.OrdinationResults) -> None: mf = sample_metadata.to_dataframe() output = join(output_dir, 'emperor-required-resources/') viz = Emperor(pcoa, mf, remote=output) with open(join(output_dir, 'index.html'), 'w') as f: # correct the path html = viz.make_emperor(standalone=True) viz.copy_support_files(output_dir) f.write(html) return None
def generic_plot(output_dir: str, master: skbio.OrdinationResults, metadata: qiime2.Metadata, other_pcoa: skbio.OrdinationResults, plot_name: str, info: str = None, custom_axes: str = None, settings: dict = None, ignore_missing_samples: bool = False, feature_metadata: qiime2.Metadata = None): mf = metadata.to_dataframe() if feature_metadata is not None: feature_metadata = feature_metadata.to_dataframe() if other_pcoa is None: procrustes = None else: procrustes = [other_pcoa] viz = Emperor(master, mf, feature_mapping_file=feature_metadata, ignore_missing_samples=ignore_missing_samples, procrustes=procrustes, remote='.') if custom_axes is not None: viz.custom_axes = custom_axes if other_pcoa: viz.procrustes_names = ['reference', 'other'] viz.info = info viz.settings = settings html = viz.make_emperor(standalone=True) viz.copy_support_files(output_dir) with open(os.path.join(output_dir, 'emperor.html'), 'w') as fh: fh.write(html) index = os.path.join(TEMPLATES, 'index.html') q2templates.render(index, output_dir, context={'plot_name': plot_name})
def emperor_output(sklearn_output, full_file_list, eigenvalues, percent_variance, output_file, new_files = None): print("Made it to Emperor Function!") #read in sklearn output and format accordingly for emperor intake eigvals = pd.Series(data = eigenvalues) samples = pd.DataFrame(data = sklearn_output, index = full_file_list) p_explained = pd.Series(data = percent_variance) ores = OrdinationResults(long_method_name = "principal component analysis", short_method_name = "pcoa", eigvals = eigvals, samples = samples, proportion_explained = p_explained) #this first part is for the global metadata file global_metadata = pd.read_csv(config.PATH_TO_ORIGINAL_MAPPING_FILE, sep = "\t") global_metadata_headers = global_metadata.columns.tolist() global_metadata.rename(columns = {'filename': 'SampleID'}, inplace = True) global_metadata["type"] = "Global Data" global_metadata.set_index("SampleID", inplace = True) common = global_metadata #this part is for the user uploaded metadata file if new_files != None: metadata_uploaded = pd.DataFrame({"SampleID": new_files, "type":["Your Data"] * len(new_files)}) for item in global_metadata_headers: metadata_uploaded[item] = ["Your Data"] * len(new_files) metadata_uploaded.set_index("SampleID", inplace = True) common = pd.concat([global_metadata, metadata_uploaded]) #so you need to align the metadata and the files contained within the ordination file BEFORE feeding it into the Emperor thing otherwise it doesn't like to output results final_metadata, unused = common.align(samples, join = "right", axis = 0) #call stuff to ouput an emperor plot emp = Emperor(ores, final_metadata, remote = True) # create an output directory os.makedirs(output_file, exist_ok=True) with open(os.path.join(output_file, 'index.html'), 'w') as f: f.write(emp.make_emperor(standalone = True)) emp.copy_support_files(output_file)
def emperor_output(sklearn_output, full_file_list, eigenvalues, percent_variance, output_file, new_files=[]): eigvals = pd.Series(data=eigenvalues) samples = pd.DataFrame(data=sklearn_output, index=full_file_list) samples.index.rename("SampleID", inplace=True) p_explained = pd.Series(data=percent_variance) ores = OrdinationResults(long_method_name="principal component analysis", short_method_name="pcoa", eigvals=eigvals, samples=samples, proportion_explained=p_explained) #read in all sample metadata df = pd.read_table(config.PATH_TO_ORIGINAL_MAPPING_FILE) df.rename(columns={"filename": "SampleID"}, inplace=True) df.set_index("SampleID", inplace=True) #handling the case in which the pca is a projection if len(new_files) != 0: df["Type"] = "Global" new_meta = pd.DataFrame({"SampleID": new_files, "Type": "Your Data"}) new_meta.set_index("SampleID", inplace=True) df = pd.concat([df, new_meta], axis=0, join="outer") final_metadata, unused = df.align(samples, join="right", axis=0) #call stuff to ouput an emperor plot emp = Emperor(ores, final_metadata, remote=True) # create an output directory os.makedirs(output_file, exist_ok=True) with open(os.path.join(output_file, 'index.html'), 'w') as f: f.write(emp.make_emperor(standalone=True)) emp.copy_support_files(output_file)
def create_emperor_visual(args, pcfile): """ Sample .pc file # Eigvals 4 # 0.2705559825337763 0.07359266496720843 0.02997793703738496 0.0 # # Proportion explained 4 # 0.7231669539538659 0.19670525434062255 0.0801277917055116 0.0 # # Species 0 0 # # Site 4 4 # ICM_LCY_Bv6--LCY_0001_2003_05_11 -0.04067063044757823 -0.09380781760926289 0.13680474645584195 0.0 # ICM_LCY_Bv6--LCY_0003_2003_05_04 -0.11521436634022217 -0.15957409396683217 -0.10315005726535573 0.0 # ICM_LCY_Bv6--LCY_0005_2003_05_16 0.4268532792747924 0.06657577342833808 -0.02212569426459717 0.0 # ICM_LCY_Bv6--LCY_0007_2003_05_04 -0.2709682824869916 0.18680613814775715 -0.011528994925888972 0.0 # # Biplot 0 0 # # Site constraints 0 0 """ #print PCoA_result from emperor import Emperor from skbio import OrdinationResults #load metadata mf = load_mf(args.map_fp) # must read from file (scikit-bio version 0.5.1 http://scikit-bio.org/docs/0.5.1/generated/generated/skbio.stats.ordination.OrdinationResults.html res = OrdinationResults.read(pcfile) emp = Emperor(res, mf) #pcoa_outdir = os.path.join(args.basedir,'views', 'tmp',args.prefix+'_pcoa3d') pcoa_outdir = os.path.join(args.basedir, args.prefix + '_pcoa3d') print('OUT?', pcoa_outdir, args.basedir) os.makedirs(pcoa_outdir, mode=0o777, exist_ok=True) with open(os.path.join(pcoa_outdir, 'index.html'), 'w') as f: f.write(emp.make_emperor(standalone=True)) emp.copy_support_files(pcoa_outdir)
def create_emperor_visual(args, pcfile): """ Sample .pc file # Eigvals 4 # 0.2705559825337763 0.07359266496720843 0.02997793703738496 0.0 # # Proportion explained 4 # 0.7231669539538659 0.19670525434062255 0.0801277917055116 0.0 # # Species 0 0 # # Site 4 4 # ICM_LCY_Bv6--LCY_0001_2003_05_11 -0.04067063044757823 -0.09380781760926289 0.13680474645584195 0.0 # ICM_LCY_Bv6--LCY_0003_2003_05_04 -0.11521436634022217 -0.15957409396683217 -0.10315005726535573 0.0 # ICM_LCY_Bv6--LCY_0005_2003_05_16 0.4268532792747924 0.06657577342833808 -0.02212569426459717 0.0 # ICM_LCY_Bv6--LCY_0007_2003_05_04 -0.2709682824869916 0.18680613814775715 -0.011528994925888972 0.0 # # Biplot 0 0 # # Site constraints 0 0 """ #print PCoA_result from emperor import Emperor from skbio import OrdinationResults #load metadata mf = load_mf(args.map_fp) # must read from file (scikit-bio version 0.5.1 http://scikit-bio.org/docs/0.5.1/generated/generated/skbio.stats.ordination.OrdinationResults.html res = OrdinationResults.read(pcfile) emp = Emperor(res, mf) pcoa_outdir = os.path.join(args.basedir,'views', 'tmp',args.prefix+'_pcoa3d') print('OUT?',pcoa_outdir,args.basedir) os.makedirs(pcoa_outdir, exist_ok=True) with open(os.path.join(pcoa_outdir, 'index.html'), 'w') as f: f.write(emp.make_emperor(standalone=True)) emp.copy_support_files(pcoa_outdir)
coords = (np.random.randn(N, 10) * 1000).tolist() pct_var = pd.Series(1/np.exp(np.arange(10))) pct_var = pct_var / pct_var.sum() md_headers = ['SampleID', 'DOB', 'Strings'] metadata = [] for _id in coords_ids: metadata.append([_id, ''.join(sample(set(categories), 1)), ''.join(choice( ascii_letters) for x in range(10))]) samples = pd.DataFrame(index=coords_ids, data=coords) mf = pd.DataFrame(data=metadata, columns=md_headers) mf.set_index('SampleID', inplace=True) minerals = ['rhodium', 'platinum', 'gold', 'ruthenium'] mf['subject'] = np.random.randint(low=0, high=len(minerals), size=N) mf['subject'] = mf['subject'].apply(lambda x: minerals[x]) res = OrdinationResults(short_method_name='PC', long_method_name='Principal ' 'Coordinates Analysis', eigvals=pct_var, samples=samples, proportion_explained=pct_var) viz = Emperor(res, mf, remote=get_emperor_support_files_dir()) with open('new-emperor.html', 'w') as f: f.write(viz.make_emperor(standalone=True))
def main(arguments): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, epilog=textwrap.dedent( '''additional information: output of taxsum, betadiv and pcoa are written in STDOUT If you use microbiomeutils in your work, please cite: Tremblay, Julien microbiomeutils 0.9 : Microbiome utilities https://github.com/jtremblay/microbiomeutils Thank you.''')) subparsers = parser.add_subparsers(title='subcommands', description='valid subcommands', help='additional help', dest="command") parser_bd = subparsers.add_parser('betadiv') parser_bd.add_argument('-i', '--infile-feature-table', help="Input file", type=argparse.FileType('r')) parser_bd.add_argument("-m", "--metric", help="Diversity metric (default: bray-curtis)", choices=["bray-curtis", "weighted-unifrac"], default="bray-curtis") parser_bd.add_argument("-t", "--infile-tree", help="Tree file (for weighted uniFrac)", type=argparse.FileType('r')) #parser_bd.set_defaults(func=betadiv) parser_ts = subparsers.add_parser('taxsum') parser_ts.add_argument('-i', '--infile-feature-table', help="Input file", type=argparse.FileType('r')) parser_ts.add_argument("-t", "--sumtype", help="Summary type (default: absolute)", choices=["absolute", "relative"], default="absolute") parser_ts.add_argument("-l", "--level", help="Level <int> 1 to 7", choices=["1", "2", "3", "4", "5", "6", "7", "8"], default="3") #parser_bd.set_defaults(func=taxsum) parser_ts = subparsers.add_parser('pcoa') parser_ts.add_argument('-i', '--infile-distance-matrix', help="Input file", type=argparse.FileType('r')) parser_ts = subparsers.add_parser('emperor') parser_ts.add_argument('-i', '--infile-coords', help="Input file", type=argparse.FileType('r')) parser_ts.add_argument('-m', '--mapping-file', help="Mapping file", type=argparse.FileType('r')) parser_ts.add_argument('-o', '--outdir', help="Output directory") args = parser.parse_args(arguments) if args.command == 'betadiv': infile_feature_table = os.path.abspath(args.infile_feature_table.name) sys.stderr.write("[betadiv]\n") if args.infile_tree is None and args.metric == "weighted-unifrac": raise ValueError( 'weighted-unifrac needs a tree supplied. --infile-tree needed') if args.metric == "bray-curtis": betadiv(infile_feature_table, args.metric) else: betadiv(infile_feature_table, args.metric, args.infile_tree.name) elif args.command == 'taxsum': infile_feature_table = os.path.abspath(args.infile_feature_table.name) sys.stderr.write("[taxsum]\n") taxsum(infile_feature_table, args.sumtype, args.level) elif args.command == 'pcoa': sys.stderr.write("[pcoa]\n") infile_distance_matrix = os.path.abspath( args.infile_distance_matrix.name) ord_res = do_pcoa(infile_distance_matrix) elif args.command == 'emperor': sys.stderr.write("[emperor]\n") metadata = pd.read_csv(args.mapping_file, sep='\t', index_col='#SampleID', dtype={'#SampleID': 'string'}) ordination = OrdinationResults.read(args.infile_coords) # the remote argument refers to where the support files will be located # relative to the plot itself i.e. index.html. emp = Emperor(ordination, metadata, remote='.') output_folder = args.outdir # new folder where data will be saved # create an output directory os.makedirs(output_folder, exist_ok=True) with open(os.path.join(output_folder, 'index.html'), 'w') as f: f.write(emp.make_emperor(standalone=True)) emp.copy_support_files(output_folder)
class Empress(): def __init__(self, tree, table, sample_metadata, feature_metadata=None, ordination=None, ignore_missing_samples=False, filter_missing_features=False, resource_path=None, filter_unobserved_features_from_phylogeny=True): """Visualize a phylogenetic tree Use this object to interactively display a phylogenetic tree using the Empress GUI. Parameters ---------- tree: bp.Tree: The phylogenetic tree to visualize. table: pd.DataFrame: The matrix to visualize paired with the phylogenetic tree. sample_metadata: pd.DataFrame DataFrame object with the metadata associated to the samples in the ``ordination`` object, should have an index set and it should match the identifiers in the ``ordination`` object. feature_metadata: pd.DataFrame, optional DataFrame object with the metadata associated to the names of tips and/or internal nodes in the ``tree`` object, should have an index set and it should match at least one of these nodes' names. ordination: skbio.OrdinationResults, optional Object containing the computed values for an ordination method in scikit-bio. Currently supports skbio.stats.ordination.PCoA and skbio.stats.ordination.RDA results. ignore_missing_samples: bool, optional (default False) If True, pads missing samples (i.e. samples in the table but not the metadata) with placeholder metadata. If False, raises a DataMatchingError if any such samples exist. (Note that in either case, samples in the metadata but not in the table are filtered out; and if no samples are shared between the table and metadata, a DataMatchingError is raised regardless.) This is analogous to the ignore_missing_samples flag in Emperor. filter_missing_features: bool, optional (default False) If True, filters features from the table that aren't present as tips in the tree. If False, raises a DataMatchingError if any such features exist. (Note that in either case, features in the tree but not in the table are preserved.) resource_path: str, optional Load the resources from a user-specified remote location. If set to None resources are loaded from the current directory. filter_unobserved_features_from_phylogeny: bool, optional If True, filters features from the phylogeny that aren't present as features in feature table. features in feature table. Otherwise, the phylogeny is not filtered. Attributes ---------- tree: Phylogenetic tree. table: Contingency matrix for the phylogeny. samples: Sample metadata. features: Feature metadata. ordination: Ordination matrix to visualize simultaneously with the tree. base_url: Base path to the remote resources. """ self.tree = tree self.table = table self.samples = sample_metadata.copy() if feature_metadata is not None: self.features = feature_metadata.copy() else: self.features = None self.ordination = ordination self.base_url = resource_path if self.base_url is None: self.base_url = './' self._validate_and_match_data( ignore_missing_samples, filter_missing_features, filter_unobserved_features_from_phylogeny) if self.ordination is not None: # Note that tip-level metadata is the only "feature metadata" we # send to Emperor, because internal nodes in the tree should not # correspond to features in the table (and thus to arrows in a # biplot). self._emperor = Emperor( self.ordination, mapping_file=self.samples, feature_mapping_file=self.tip_md, ignore_missing_samples=ignore_missing_samples, remote='./emperor-resources') else: self._emperor = None def _validate_and_match_data(self, ignore_missing_samples, filter_missing_features, filter_unobserved_features_from_phylogeny): # remove unobserved features from the phylogeny if filter_unobserved_features_from_phylogeny: self.tree = self.tree.shear(set(self.table.columns)) # extract balance parenthesis self._bp_tree = list(self.tree.B) self.tree = Tree.from_tree(to_skbio_treenode(self.tree)) fill_missing_node_names(self.tree) # Note that the feature_table we get from QIIME 2 (as an argument to # this function) is set up such that the index describes sample IDs and # the columns describe feature IDs. We transpose this table before # sending it to tools.match_inputs() and keep using the transposed # table for the rest of this visualizer. self.table, self.samples, self.tip_md, self.int_md = match_inputs( self.tree, self.table.T, self.samples, self.features, ignore_missing_samples, filter_missing_features) def copy_support_files(self, target=None): """Copies the support files to a target directory If an ordination is included Emperor's support files will also be copied over (in a directory named emperor-resources). Parameters ---------- target : str The path where resources should be copied to. By default it copies the files to ``self.base_url``. """ if target is None: target = self.base_url # copy the required resources copytree(SUPPORT_FILES, os.path.join(target, 'support_files')) if self._emperor is not None: self._emperor.copy_support_files( os.path.join(target, 'emperor-resources')) def __str__(self): return self.make_empress() def make_empress(self): """Build an empress plot Returns ------- str Formatted empress plot. Notes ----- Once you generate the plot (and write it to a HTML file in a given directory) you will need to copy the support files (the JS/CSS/etc. code needed to view the visualization) to the same directory by calling the ``copy_support_files`` method. See Also -------- empress.core.Empress.copy_support_files """ main_template = self._get_template() # _process_data does a lot of munging to the coordinates data and # _to_dict puts the data into a dictionary-like object for consumption data = self._to_dict() plot = main_template.render(data) return plot def _to_dict(self): """Convert processed data into a dictionary Returns ------- dict A dictionary describing the plots contained in the ordination object and the sample + feature metadata. """ # Compute coordinates resulting from layout algorithm(s) # TODO: figure out implications of screen size layout_to_coordsuffix, default_layout = self.tree.coords(4020, 4020) tree_data = {} names_to_keys = {} for i, node in enumerate(self.tree.postorder(include_self=True), 1): tree_data[i] = { 'name': node.name, 'color': [0.75, 0.75, 0.75], 'sampVal': 1, 'visible': True, 'single_samp': False } # Add coordinate data from all layouts for this node for layoutsuffix in layout_to_coordsuffix.values(): xcoord = "x" + layoutsuffix ycoord = "y" + layoutsuffix tree_data[i][xcoord] = getattr(node, xcoord) tree_data[i][ycoord] = getattr(node, ycoord) # Hack: it isn't mentioned above, but we need start pos info for # circular layout. The start pos for the other layouts is the # parent xy coordinates so we need only need to specify the start # for circular layout. tree_data[i]["xc0"] = node.xc0 tree_data[i]["yc0"] = node.yc0 # Also add vertical bar coordinate info for the rectangular layout, # and start point & arc coordinate info for the circular layout if not node.is_tip(): tree_data[i]["highestchildyr"] = node.highest_child_yr tree_data[i]["lowestchildyr"] = node.lowest_child_yr if not node.is_root(): tree_data[i]["arcx0"] = node.arcx0 tree_data[i]["arcy0"] = node.arcy0 tree_data[i]["arcstartangle"] = node.highest_child_clangle tree_data[i]["arcendangle"] = node.lowest_child_clangle if node.name in names_to_keys: names_to_keys[node.name].append(i) else: names_to_keys[node.name] = [i] names = [] for node in self.tree.preorder(include_self=True): names.append(node.name) # Convert sample metadata to a JSON-esque format sample_data = self.samples.to_dict(orient='index') # Convert feature metadata, similarly to how we handle sample metadata. # If the user passed in feature metadata, self.features won't be None. # (We don't actually use any data from self.features at this point in # the program since it hasn't had taxonomy splitting / matching / etc. # done.) if self.features is not None: # If we're in this block, we know that self.tip_md and self.int_md # are both DataFrames. They have identical columns, so we can just # use self.tip_md.columns when setting feature_metadata_columns. # (We don't use self.features.columns because stuff like taxonomy # splitting will have changed the columns from what they initially # were in some cases.) feature_metadata_columns = list(self.tip_md.columns) # Calling .to_dict() on an empty DataFrame just gives you {}, so # this is safe even if there is no tip or internal node metadata. # (...At least one of these DFs should be populated, though, since # none of the feature IDs matching up would have caused an error.) tip_md_json = self.tip_md.to_dict(orient='index') int_md_json = self.int_md.to_dict(orient='index') else: feature_metadata_columns = [] tip_md_json = {} int_md_json = {} # TODO: Empress is currently storing all metadata as strings. This is # memory intensive and won't scale well. We should convert all numeric # data/compress metadata. # This is used in biom-table. Currently this is only used to ignore # null data (i.e. NaN and "unknown") and also determines sorting order. # The original intent is to signal what columns are # discrete/continuous. type of sample metadata (n - number, o - object) sample_data_type = self.samples.dtypes.to_dict() sample_data_type = { k: 'n' if pd.api.types.is_numeric_dtype(v) else 'o' for k, v in sample_data_type.items() } # create a mapping of observation ids and the samples that contain them obs_data = {} feature_table = (self.table > 0) for _, series in feature_table.iteritems(): sample_ids = series[series].index.tolist() obs_data[series.name] = sample_ids data_to_render = { 'base_url': './support_files', 'tree': self._bp_tree, 'tree_data': tree_data, 'names_to_keys': names_to_keys, 'sample_data': sample_data, 'sample_data_type': sample_data_type, 'tip_metadata': tip_md_json, 'int_metadata': int_md_json, 'feature_metadata_columns': feature_metadata_columns, 'obs_data': obs_data, 'names': names, 'layout_to_coordsuffix': layout_to_coordsuffix, 'default_layout': default_layout, 'emperor_div': '', 'emperor_require_logic': '', 'emperor_style': '', 'emperor_base_dependencies': '', 'emperor_classes': '' } if self._emperor is not None: data_to_render.update(self._scavenge_emperor()) return data_to_render def _get_template(self, standalone=False): """Get the jinja template object Parameters ---------- standalone: bool, optional Whether or not the generated plot will load resources locally (``True``), or from a specified URL (``False``). Returns ------- jinja2.Template Template where the plot is created. """ # based on: http://stackoverflow.com/a/6196098 env = Environment(loader=FileSystemLoader(TEMPLATES)) return env.get_template('empress-template.html') def _scavenge_emperor(self): # can't make this 50vw because one of the plot containers has some # padding that makes the divs stack on top of each other self._emperor.width = '48vw' self._emperor.height = '100vh; float: right' # make the background white so it matches Empress self._emperor.set_background_color('white') self._emperor.set_axes(color='black') html = self._emperor.make_emperor(standalone=True) html = html.split('\n') # The following line references will be replace with API calls to the # Emperor object, however those are not implemented yet emperor_base_dependencies = html[6] # line 14 is where the CSS includes start, but it is surrounded by # unnecessary tags so we strip those out style = '\n'.join([ line.strip().replace("'", '').replace(',', '') for line in html[14:20] ]) # main divs for emperor emperor_div = '\n'.join(html[39:44]) # main js script for emperor emperor_require_logic = '\n'.join(html[45:-3]) # once everything is loaded replace the callback tag for custom JS with open(SELECTION_CALLBACK_PATH) as f: selection_callback = f.read() emperor_require_logic = emperor_require_logic.replace( '/*__select_callback__*/', selection_callback) emperor_data = { 'emperor_div': emperor_div, 'emperor_require_logic': emperor_require_logic, 'emperor_style': style, 'emperor_base_dependencies': emperor_base_dependencies, 'emperor_classes': 'combined-plot-container' } return emperor_data