def test_no_wells(): with raises(ConfigError, match="No wells defined"): wellmap.load(DIR / "empty.toml") # The following examples actually trigger a different (and more specific) # exception, but it still has "No wells defined" in the message. with raises(ConfigError, match="No wells defined"): wellmap.load(DIR / "row_without_col.toml") with raises(ConfigError, match="No wells defined"): wellmap.load(DIR / "irow_without_col.toml") with raises(ConfigError, match="No wells defined"): wellmap.load(DIR / "col_without_row.toml") with raises(ConfigError, match="No wells defined"): wellmap.load(DIR / "icol_without_row.toml")
def test_two_plates(): df = wellmap.load( DIR / 'two_plates.toml', data_loader=pd.read_csv, merge_cols={'well': 'Well'}, ) assert row(df, 'plate == "a"') == dict( path=DIR / 'two_plates_a.csv', plate='a', well='A1', Well='A1', well0='A01', row='A', col='1', row_i=0, col_j=0, x=1, Data=0, ) assert row(df, 'plate == "b"') == dict( path=DIR / 'two_plates_b.csv', plate='b', well='A1', Well='A1', well0='A01', row='A', col='1', row_i=0, col_j=0, x=2, Data=1, )
def run(self): rel_paths = shlex.split(self.arguments[0]) example_rst = "" if not self.content: contents = [None] * len(rel_paths) else: contents = '\n'.join(self.content).split('--EOF--') if len(contents) != len(rel_paths): raise self.error( f"found {len(content)} TOML snippets, but {len(rel_paths)} paths." ) for rel_path, content in zip(rel_paths, contents): toml_path, toml_abs_path = self.env.relfn2path(rel_path) name = os.path.basename(toml_path) if content: update_toml_file(toml_abs_path, content) if not os.path.exists(toml_abs_path): raise self.error(f"no such file: {toml_path}") example_rst += f'''\ .. literalinclude:: /{toml_path} :language: toml :caption: :download:`{name} </{toml_path}>` ''' # Only make a figure for the last snippet. if 'no-figure' not in self.options: svg_path = change_ext(toml_path, '.svg') svg_abs_path = change_ext(toml_abs_path, '.svg') df, deps = wellmap.load(toml_abs_path, report_dependencies=True) if any_deps_stale(svg_abs_path, deps): logger.info(f"[example] rendering: {svg_path}") attrs = self.options.get('attrs', []) cmap = wellmap.plot.get_colormap( self.options.get('color', 'rainbow')) fig = wellmap.plot.plot_layout(df, attrs, cmap) fig.savefig(svg_abs_path, bbox_inches='tight') example_rst += f'''\ .. figure:: /{svg_path} ''' example_str_list = StringList(example_rst.splitlines()) wrapper = nodes.container(classes=['wellmap-example']) self.state.nested_parse(example_str_list, 0, wrapper) return [wrapper]
def test_one_plate(): labels = wellmap.load(DIR / 'one_plate.toml') assert row(labels, 'well == "A1"') == dict( path=DIR / 'one_plate.csv', plate='a', well='A1', well0='A01', row='A', col='1', row_i=0, col_j=0, x=1, )
def load(self): df = wellmap.load( self.layout_toml, data_loader=load_cq, merge_cols=True, path_guess='{0.stem}', ) # Fill in optional columns: if 'template' not in df: df['template'] = np.nan if 'primers' not in df: df['primers'] = np.nan return df, {}
def load(self): df, extras = wellmap.load( self.layout_toml, data_loader=load_cq, merge_cols=True, path_guess='{0.stem}', extras=['qpcr'], ) def fill_default(k, default=pd.NA): df[k] = df[k].fillna(pd.NA) if k in df else default fill_default('control') fill_default('template') fill_default('primers') fill_default('date') df['is_control'] = df['control'].fillna(False).astype(bool) return df, extras
def test_concat(): labels = wellmap.load(DIR / 'one_concat.toml') assert len(labels) == 2 with raises(ConfigError, match="Did you mean to set `meta.path`?"): wellmap.load(DIR / 'one_concat.toml', path_required=True) labels = wellmap.load(DIR / 'two_concats_list.toml') assert len(labels) == 3 with raises(ConfigError, match="Did you mean to set `meta.path`?"): wellmap.load(DIR / 'two_concats_list.toml', path_required=True) # Should not raise. It's ok that `just_concat.csv` doesn't exist, because # `just_concat.toml` doesn't specify any wells. labels = wellmap.load( DIR / 'just_concat.toml', path_guess='{0.stem}.csv', path_required=True, ) assert len(labels) == 1
def test_bad_args(): # Doesn't make sense to specify `merge_cols` without `data_loader`: with raises(ValueError): wellmap.load(DIR / 'two_plates.toml', merge_cols={}) # Non-existent merge columns. with raises(ValueError, match='xxx'): wellmap.load( DIR / 'two_plates.toml', data_loader=pd.read_csv, merge_cols={'xxx': 'Well'}, ) with raises(ValueError, match='xxx'): wellmap.load( DIR / 'two_plates.toml', data_loader=pd.read_csv, merge_cols={'well': 'xxx'}, )
import matplotlib.pyplot as plt import color_me import itertools from dbp.plate_reader import BiotekExperiment from scipy.stats import linregress def load_plate_reader(p): expt = BiotekExperiment(p) return expt.kinetic['450,521'] df = wellmap.load( toml_path='20201211_meaure_o42_o213.toml', data_loader=load_plate_reader, merge_cols=True, ) fig, ax = plt.subplots() x_fit = np.linspace(min(df['minutes']), max(df['minutes'])) iter_colors = lambda it: zip(itertools.cycle(color_me.ucsf.cycle), it) for color, ((beacon, dnazyme), g) in iter_colors(df.groupby(['beacon', 'dnazyme'])): x, y = g['minutes'], g['read'] m, b, r, p, err = linregress(x, y) y_fit = m * x_fit + b ax.plot(
def load_cq(path): csv_dir = path.parent / path.stem csv_path = csv_dir / 'Quantification Cq Results.csv' return pd.read_csv(csv_path)[['Well', 'Cq']] def format_label(slug): low, high = slug.split('_') return f'{low}–{high}°C' path = Path('20200803_optimize_ta.toml') df = wellmap.load( path, data_loader=load_cq, merge_cols={'well0': 'Well'}, ) for label, df_plate in df.groupby('plate'): plt.plot( df_plate['temperature_C'], df_plate['Cq'] - min(df_plate['Cq']), marker='+', linestyle='none', label=format_label(label), ) plt.xlabel('Ta (°C)') plt.ylabel('ΔCq') plt.legend(loc='best')
if ylim: ax.set_ylim(*ylim) else: ax.set_ylim(0, ax.get_ylim()[1]) return fig, ax if __name__ == '__main__': args = docopt.docopt(__doc__) toml_path = Path(args['<toml>']) df, extras = wellmap.load( toml_path=toml_path, data_loader=load_plate_reader, merge_cols=True, path_guess='{0.stem}.xlsx', extras='style', ) if 'fit_start_min' not in df: df['fit_start_min'] = 0 if 'fit_stop_min' not in df: df['fit_stop_min'] = max(df['minutes']) df['control'] = df['control'].fillna('') style = Style.from_extras(df, extras) fits = calc_linear_fits(df) if args['--y-lim']: ylim = map(float, args['--y-lim'].split(',')) else:
from scipy.stats import linregress def load_cq(path): return (pd.read_csv(path).rename(columns={ 'Cq': 'row' }).melt( id_vars=['row'], var_name='col', value_name='Cq', )) df = wellmap.load( 'std_curve.toml', data_loader=load_cq, merge_cols=True, path_guess='{0.stem}.csv', ) x = df['dilution'] y = df['Cq'] m, b, r, p, err = linregress(np.log10(x), y) x_fit = np.logspace(0, 5) y_fit = np.polyval((m, b), np.log10(x_fit)) r2 = r**2 eff = 100 * (10**(1 / m) - 1) label = 'R²={:.5f}\neff={:.2f}%'.format(r2, eff) plt.plot(x_fit, y_fit, '--', label=label)
#!/usr/bin/env python3 import wellmap df = wellmap.load('simple_1.toml') print(df)
def test_reasonably_complex(): df = wellmap.load(DIR / 'reasonably_complex.toml') assert len(df) == 32
def test_one_well(): labels = wellmap.load(DIR / 'one_well_xy.toml') assert row(labels, 'well == "A1"') == dict( well='A1', well0='A01', row='A', col='1', row_i=0, col_j=0, x=1, y=1, ) labels = wellmap.load(DIR / 'one_well_xy.toml', path_guess='{0.stem}.csv') assert row(labels, 'well == "A1"') == dict( path=DIR / 'one_well_xy.csv', well='A1', well0='A01', row='A', col='1', row_i=0, col_j=0, x=1, y=1, ) _, deps = wellmap.load(DIR / 'one_well_xy.toml', report_dependencies=True) assert deps == {DIR / 'one_well_xy.toml'} with raises(ConfigError, match='one_well_xy.toml'): wellmap.load(DIR / 'one_well_xy.toml', path_required=True) with raises(ConfigError, match='one_well_xy.toml'): wellmap.load(DIR / 'one_well_xy.toml', data_loader=pd.read_csv) labels, data = wellmap.load( DIR / 'one_well_xy.toml', data_loader=pd.read_csv, path_guess='{0.stem}.csv', ) assert row(labels, 'well == "A1"') == dict( path=DIR / 'one_well_xy.csv', well='A1', well0='A01', row='A', col='1', row_i=0, col_j=0, x=1, y=1, ) assert row(data, 'Well == "A1"') == dict( Well='A1', path=DIR / 'one_well_xy.csv', Data='xy', ) df = wellmap.load( DIR / 'one_well_xy.toml', data_loader=pd.read_csv, merge_cols={'well': 'Well'}, path_guess='{0.stem}.csv', ) assert row(df, 'well == "A1"') == dict( path=DIR / 'one_well_xy.csv', well='A1', Well='A1', well0='A01', row='A', col='1', row_i=0, col_j=0, x=1, y=1, Data='xy', ) df = wellmap.load( DIR / 'one_well_xy.toml', data_loader=read_csv_and_rename, merge_cols=True, path_guess='{0.stem}.csv', ) assert row(df, 'well == "A1"') == dict( path=DIR / 'one_well_xy.csv', well='A1', Well='A1', well0='A01', row='A', col='1', row_i=0, col_j=0, x=1, y=1, Data='xy', )
def test_one_well_with_extras(): expected = {'extras': {'a': 1, 'b': 1}} def data_loader(path, extras): assert extras == expected return pd.read_csv(path) # No data: a1_expected = dict( well='A1', well0='A01', row='A', col='1', row_i=0, col_j=0, x=1, y=1, ) labels, extras = wellmap.load( DIR / 'one_well_xy_extras.toml', extras=True, ) assert row(labels, 'well == "A1"') == a1_expected assert extras == expected # No data, with extras present but not requested: labels = wellmap.load(DIR / 'one_well_xy_extras.toml', ) assert row(labels, 'well == "A1"') == a1_expected # No data, with extras and dependencies requested: labels, extras, deps = wellmap.load( DIR / 'one_well_xy_extras.toml', extras=True, report_dependencies=True, ) assert row(labels, 'well == "A1"') == a1_expected assert extras == expected assert deps == { DIR / 'one_well_xy_extras.toml', } # Load labels and data, but don't merge: labels, data, extras = wellmap.load( DIR / 'one_well_xy_extras.toml', data_loader=data_loader, path_guess='{0.stem}.csv', extras=True, ) assert row(labels, 'well == "A1"') == dict( path=DIR / 'one_well_xy_extras.csv', well='A1', well0='A01', row='A', col='1', row_i=0, col_j=0, x=1, y=1, ) assert row(data, 'Well == "A1"') == dict( Well='A1', path=DIR / 'one_well_xy_extras.csv', Data='xy', ) assert extras == expected # Automatic merge: a1_expected = dict( path=DIR / 'one_well_xy_extras.csv', well='A1', Well='A1', well0='A01', row='A', col='1', row_i=0, col_j=0, x=1, y=1, Data='xy', ) df, extras = wellmap.load( DIR / 'one_well_xy_extras.toml', data_loader=data_loader, merge_cols={'well': 'Well'}, path_guess='{0.stem}.csv', extras=True, ) assert row(df, 'well == "A1"') == a1_expected assert extras == expected df, extras = wellmap.load( DIR / 'one_well_xy_extras.toml', data_loader=read_csv_and_rename, merge_cols=True, path_guess='{0.stem}.csv', extras=True, ) assert row(df, 'well == "A1"') == a1_expected assert extras == expected