def freeze_production_ratios(self, ans): self.info('freeze production ratios') def ai_gen(): key = lambda x: x.irradiation lkey = lambda x: x.level for irrad, ais in groupby(sorted(ans, key=key), key=key): for level, ais in groupby(sorted(ais, key=lkey), key=lkey): pr = self.meta_repo.get_production(irrad, level) for ai in ais: yield pr, ai added = [] def func(x, prog, i, n): pr, ai = x if prog: prog.change_message('Freezing Production {}'.format(ai.runid)) p = analysis_path(ai.runid, ai.repository_identifier, 'productions', mode='w') pr.dump(path=p) added.append((ai.repository_identifier, p)) progress_loader(ai_gen(), func, threshold=1) self._commit_freeze(added, '<PR_FREEZE>')
def _import_reduced(self, unks): def func(unk, prog, i, n): prog.change_message('Transfering {} {}/{}'.format(unk.record_id, i, n)) ms_unk = self.recaller.find_analysis(unk.identifier, unk.aliquot, unk.step) keys = [] fkeys = [] detkeys = [] for k, iso in unk.isotopes.items(): miso = ms_unk.isotopes[k] iso.set_uvalue((miso.value, miso.error)) det = iso.detector fkeys.append(det) fkeys.append(k) iso.baseline.set_uvalue((miso.baseline.value, miso.baseline.error)) unk.set_temporary_blank(k, miso.blank.value, miso.blank.error, 'mass_spec_reduced') unk.set_temporary_uic_factor(det, miso.ic_factor) detkeys.append(det) keys.append(k) unk.dump_fits(fkeys) self._paths.append(unk.intercepts_path) self._paths.append(unk.baselines_path) unk.dump_blanks(keys) self._paths.append(unk.blanks_path) icfits = ['mass_spec_reduced' for _ in detkeys] unk.dump_icfactors(detkeys, icfits) self._paths.append(unk.ic_factors_path) meta = unk.get_meta() meta['comment'] = ms_unk.comment unk.dump_meta(meta) self._paths.append(unk.meta_path) # update the tag unk.set_tag({'name': ms_unk.tag, 'note': ''}) path = self.dvc.update_tag(unk, add=False) self._paths.append(path) self.dvc.set_analysis_tag(unk, ms_unk.tag) progress_loader(unks, func)
def make_analyses(self, records, calculate_f_only=False): if not records: return globalv.active_analyses = records # load repositories exps = {r.repository_identifier for r in records} # if self.pulled_repositories: # exps = exps - self.pulled_repositories # # self.pulled_repositories.union(exps) # else: # self.pulled_repositories = exps for ei in exps: self.sync_repo(ei) st = time.time() make_record = self._make_record def func(*args): return make_record(calculate_f_only=calculate_f_only, *args) ret = progress_loader(records, func, threshold=1, step=25) et = time.time() - st n = len(records) self.debug('Make analysis time, total: {}, n: {}, average: {}'.format( et, n, et / float(n))) return ret
def run(self, state): super(FitIsotopeEvolutionNode, self).run(state) po = self.plotter_options self._fits = [pi for pi in po.get_loadable_aux_plots()] fs = progress_loader(state.unknowns, self._assemble_result, threshold=1) if self.editor: self.editor.analysis_groups = [(ai,) for ai in state.unknowns] for ai in state.unknowns: ai.graph_id = 0 self._set_saveable(state) # self.name = '{} Fit IsoEvo'.format(self.name) # if self.has_save_node and po.confirm_save: # if confirmation_dialog('Would you like to review the iso fits before saving?'): # state.veto = self if fs: k = lambda an: an.isotope fs = sort_isotopes(fs, key=k) fs = [a for _, gs in groupby(fs, key=k) for x in (gs, (IsoEvoResult(),)) for a in x][:-1] e = IsoEvolutionResultsEditor(fs) state.editors.append(e)
def make_analyses(self, records, calculate_f_only=False): if not records: return # load repositories # {r.experiment_id for r in records} exps = {r.experiment_identifier for r in records} if self.pulled_experiments: exps = exps - self.pulled_experiments self.pulled_experiments.union(exps) else: self.pulled_experiments = exps # if exps: # org = Organization(self.organization) # exps = filter(lambda x: org.has_repo(x), exps) # # progress_iterator(exps, self._load_repository, threshold=1) st = time.time() wrapper = lambda *args: self._make_record(calculate_f_only=calculate_f_only, *args) # print 'records', records # records =ret= map(wrapper, records) ret = progress_loader(records, wrapper, threshold=1) et = time.time() - st n = len(records) self.debug('Make analysis time, total: {}, n: {}, average: {}'.format(et, n, et / float(n))) return ret
def make_analyses(self, records, calculate_f_only=False): if not records: return globalv.active_analyses = records # load repositories st = time.time() def func(xi, prog, i, n): if prog: prog.change_message('Syncing repository= {}'.format(xi)) self.sync_repo(xi, use_progress=False) exps = {r.repository_identifier for r in records} progress_iterator(exps, func, threshold=1) # for ei in exps: make_record = self._make_record def func(*args): # t = time.time() r = make_record(calculate_f_only=calculate_f_only, *args) # print 'make time {}'.format(time.time()-t) return r ret = progress_loader(records, func, threshold=1, step=25) et = time.time() - st n = len(records) self.debug('Make analysis time, total: {}, n: {}, average: {}'.format( et, n, et / float(n))) return ret
def run(self, state): super(FitIsotopeEvolutionNode, self).run(state) po = self.plotter_options self._fits = [pi for pi in po.get_loadable_aux_plots()] fs = progress_loader(state.unknowns, self._assemble_result, threshold=1) if self.editor: self.editor.analysis_groups = [(ai, ) for ai in state.unknowns] for ai in state.unknowns: ai.graph_id = 0 self._set_saveable(state) # self.name = '{} Fit IsoEvo'.format(self.name) # if self.has_save_node and po.confirm_save: # if confirmation_dialog('Would you like to review the iso fits before saving?'): # state.veto = self if fs: k = lambda an: an.isotope fs = sort_isotopes(fs, key=k) fs = [ a for _, gs in groupby(fs, key=k) for x in (gs, (IsoEvoResult(), )) for a in x ][:-1] e = IsoEvolutionResultsEditor(fs) state.editors.append(e)
def _construct_analyses(self, no_db_ans, db_ans, progress, calculate_age, calculate_F, unpack, use_cache, **kw): uuids = [ri.uuid for ri in no_db_ans] # for ui in uuids: # self.debug('loading uuid={}'.format(ui)) # get all dbrecords with one call # print uuids ms = self.db.get_analyses_uuid(uuids) # print ms # ms = timethis(self.db.get_analyses_uuid, args=(uuids,)) construct = self._construct_analysis add_to_cache = self._add_to_cache key = lambda x: x[0] dbrecords = groupby(ms, key=key) def func(x, prog, i, n): _, gi = dbrecords.next() self.debug('constructing {}/{} {} {}'.format(i + 1, n, x.record_id, x.uuid)) a = construct(x, gi, prog, unpack=unpack, calculate_age=calculate_age, calculate_F=calculate_F, **kw) # print a if use_cache: add_to_cache(a) return a try: return db_ans, progress_loader(no_db_ans, func, progress=progress, reraise_cancel=True) except CancelLoadingError: return [], []
def _make_records(self, ans): def func(xi, prog, i, n): if prog: prog.change_message('Loading {}'.format(xi.record_id)) return IsotopeRecordView(xi) return progress_loader(ans, func, threshold=25)
def run(self, state): super(FitIsotopeEvolutionNode, self).run(state) po = self.plotter_options self._fits = list(reversed([pi for pi in po.get_saveable_aux_plots()])) self._keys = [fi.name for fi in self._fits] unks = self._get_valid_unknowns(state.unknowns) if unks: if self.check_refit(unks): return fs = progress_loader(unks, self._assemble_result, threshold=1, step=10) if self.editor: self.editor.analysis_groups = [(ai, ) for ai in unks] self._set_saveable(state) if fs: e = IsoEvolutionResultsEditor(fs) # e.plotter_options = po state.editors.append(e)
def _make_records(self, ans): def func(xi, prog, i, n): if prog: prog.change_message('Loading {}'.format(xi.record_id)) return xi return progress_loader(ans, func, threshold=25)
def _load_sample_record_views(self, lns): def func(li, prog, i, n): if prog: prog.change_message('Loading Labnumber {}'.format(li.identifier)) return LabnumberRecordView(li) sams = progress_loader(lns, func, step=25) return sams
def _make_records(self, ans): def func(xi, prog, i, n): if prog: # if prog and i % 25 == 0: prog.change_message('Loading {}'.format(xi.record_id)) return xi return progress_loader(ans, func, threshold=25, step=25)
def _load_sample_record_views(self, lns): def func(li, prog, i, n): if prog: prog.change_message('Loading Labnumber {}'.format( li.identifier)) return LabnumberRecordView(li) sams = progress_loader(lns, func, step=25) return sams
def permutate(self, ai): func = lambda x, prog, i, n: self._permutate(ai, x, prog, i, n) perms = self._gen_unique_permutations(ai.isotopes) records = progress_loader(perms, func) # xs, es = zip(*((nominal_value(r.age), std_dev(r.age)) for r in records)) # wm, we = calculate_weighted_mean(xs, es) return records
def make_interpreted_ages(self, ias): def func(x, prog, i, n): if prog: prog.change_message('Making Interpreted age {}'.format(x.name)) obj = dvc_load(x.path) ia = DVCInterpretedAge() ia.from_json(obj) return ia return progress_loader(ias, func, step=25)
def freeze_flux(self, ans): self.info('freeze flux') def ai_gen(): key = lambda x: x.irradiation lkey = lambda x: x.level rkey = lambda x: x.repository_identifier for irrad, ais in groupby(sorted(ans, key=key), key=key): for level, ais in groupby(sorted(ais, key=lkey), key=lkey): p = self.get_level_path(irrad, level) obj = dvc_load(p) for repo, ais in groupby(sorted(ais, key=rkey), key=rkey): yield repo, irrad, level, { ai.irradiation_position: obj[ai.irradiation_position] for ai in ais } added = [] def func(x, prog, i, n): repo, irrad, level, d = x if prog: prog.change_message('Freezing Flux {}{} Repository={}'.format( irrad, level, repo)) root = os.path.join(paths.repository_dataset_dir, repo, 'flux', irrad) r_mkdir(root) p = os.path.join(root, level) if os.path.isfile(p): dd = dvc_load(p) dd.update(d) dvc_dump(d, p) added.append((repo, p)) progress_loader(ai_gen(), func, threshold=1) self._commit_freeze(added, '<FLUX_FREEZE>')
def _load_records(self, records): if records: ''' using a IsotopeRecordView is significantly faster than loading a IsotopeRecord directly ''' def func(x, prog, i, n): if prog: prog.change_message('Loading {}/{} {}'.format(i+1, n, x.record_id)) return self._record_view_factory(x) rs = progress_loader(records, func) self.records.extend(rs)
def permutate(self, ai): icf = ai.get_ic_factor('CDD') e = std_dev(icf) record_id = ai.record_id icf = 1.001 e = 0.1 perms = norm.rvs(loc=nominal_value(icf), scale=e, size=20) iso36 = ai.isotopes['Ar36'] iso36.detector = 'CDD' func = lambda x, prog, i, n: self._permutate(ai, record_id, e, x, prog, i, n) records = progress_loader(perms, func) return records
def permutate(self, ai): icf = ai.get_ic_factor("CDD") e = std_dev(icf) record_id = ai.record_id icf = 1.001 e = 0.1 perms = norm.rvs(loc=nominal_value(icf), scale=e, size=20) iso36 = ai.isotopes["Ar36"] iso36.detector = "CDD" func = lambda x, prog, i, n: self._permutate(ai, record_id, e, x, prog, i, n) records = progress_loader(perms, func) return records
def run(self, state): super(DefineEquilibrationNode, self).run(state) po = self.plotter_options self._fits = list(reversed([pi for pi in po.get_saveable_aux_plots()])) self._keys = [fi.name for fi in self._fits] unks = state.unknowns fs = progress_loader(unks, self._assemble_result, threshold=1, step=10) self._set_saveable(state) if fs: e = DefineEquilibrationResultsEditor(fs, options=po) state.editors.append(e)
def run(self, state): super(DefineEquilibrationNode, self).run(state) po = self.plotter_options self._fits = list(reversed([pi for pi in po.get_saveable_aux_plots()])) self._keys = [fi.name for fi in self._fits] unks = state.unknowns fs = progress_loader(unks, self._assemble_result, threshold=1, step=10) self._set_saveable(state) if fs: e = DefineEquilibrationResultsEditor(fs) state.editors.append(e)
def run(self, state): if not state.saveable_keys: return def wrapper(x, prog, i, n): return self._save_eq(x, prog, i, n, state.saveable_keys) msg = ','.join('{}({})'.format(*a) for a in zip(state.saveable_keys, state.saveable_fits)) items = progress_loader(state.unknowns, wrapper, threshold=1, unpack=False) modpis = self.dvc.update_analysis_paths(items, '<DEFINE EQUIL> {}'.format(msg)) modpps = self.dvc.update_analyses(state.unknowns, 'intercepts', '<ISOEVO> modified by DEFINE EQUIL') modpis.extend(modpps) if modpis: state.modified = True state.modified_projects = state.modified_projects.union(modpis)
def _load_associated_samples(self, names): db = self.manager.db samples = db.get_samples(names) self.debug('get samples n={}'.format(len(samples))) def func(li, prog, i, n): if prog: prog.change_message('Loading Sample {}'.format(li.name)) if li.labnumbers: return [SampleRecordView(li, identifier=ll.identifier) for ll in li.labnumbers] else: return SampleRecordView(li) samples = progress_loader(samples, func) self.samples = samples self.osamples = samples
def _retrieve_labnumbers(self): db = self.db # dont query if analysis_types enabled but not analysis type specified if self.use_analysis_type_filtering and not self.analysis_include_types: self.warning_dialog('Specify Analysis Types or disable Analysis Type Filtering') return [] with db.session_ctx(): ls = self._retrieve_labnumbers_hook(db) self.debug('_retrieve_labnumbers n={}'.format(len(ls))) def func(li, prog, i, n): if prog: prog.change_message('Loading Labnumber {}'.format(li.identifier)) return LabnumberRecordView(li) sams = progress_loader(ls, func) return sams
def _populate_samples(self, lns=None): db = self.db with db.session_ctx(): if not lns: lns = [db.get_labnumber(self.identifier)] n = len(lns) self.debug('_populate_samples n={}'.format(n)) def func(li, prog, i, n): if prog: prog.change_message('Loading Labnumber {}'.format(li.identifier)) return LabnumberRecordView(li) sams = progress_loader(lns, func) sel = sams[:1] if n == 1 and sams else [] self.set_samples(sams, sel)
def _retrieve_labnumbers(self): db = self.db # dont query if analysis_types enabled but not analysis type specified if self.use_analysis_type_filtering and not self.analysis_include_types: self.warning_dialog( 'Specify Analysis Types or disable Analysis Type Filtering') return [] with db.session_ctx(): ls = self._retrieve_labnumbers_hook(db) self.debug('_retrieve_labnumbers n={}'.format(len(ls))) def func(li, prog, i, n): if prog: prog.change_message('Loading Labnumber {}'.format( li.identifier)) return LabnumberRecordView(li) sams = progress_loader(ls, func) return sams
def _populate_samples(self, lns=None): db = self.db with db.session_ctx(): if not lns: lns = [db.get_labnumber(self.identifier)] n = len(lns) self.debug('_populate_samples n={}'.format(n)) def func(li, prog, i, n): if prog: prog.change_message('Loading Labnumber {}'.format( li.identifier)) return LabnumberRecordView(li) sams = progress_loader(lns, func) sel = sams[:1] if n == 1 and sams else [] self.set_samples(sams, sel)
def _make_records(self, ans): n = len(ans) self.debug('make records {}'.format(n)) import time st = time.time() def func(xi, prog, i, n): if prog: if i == 0: prog.change_message('Loading') elif i == n - 1: prog.change_message('Finished') if prog and i % 25 == 0: prog.change_message('Loading {}'.format(xi.record_id)) xi.bind() return xi ret = progress_loader(ans, func, threshold=100, step=20) self.debug('make records {}'.format(time.time() - st)) return ret
def _load_associated_samples(self, names): db = self.manager.db samples = db.get_samples(names) self.debug('get samples n={}'.format(len(samples))) def func(li, prog, i, n): if prog: prog.change_message('Loading Sample {}'.format(li.name)) if li.labnumbers: return [ SampleRecordView(li, identifier=ll.identifier) for ll in li.labnumbers ] else: return SampleRecordView(li) samples = progress_loader(samples, func) self.samples = samples self.osamples = samples
def run(self, state): if not state.saveable_keys: return def wrapper(x, prog, i, n): return self._save_eq(x, prog, i, n, state.saveable_keys) msg = ','.join('{}({})'.format(*a) for a in zip(state.saveable_keys, state.saveable_fits)) items = progress_loader(state.unknowns, wrapper, threshold=1, unpack=False) modpis = self.dvc.update_analysis_paths( items, '<DEFINE EQUIL> {}'.format(msg)) modpps = self.dvc.update_analyses(state.unknowns, 'intercepts', '<ISOEVO> modified by DEFINE EQUIL') modpis.extend(modpps) if modpis: state.modified = True state.modified_projects = state.modified_projects.union(modpis)
def run(self, state): super(FitIsotopeEvolutionNode, self).run(state) po = self.plotter_options self._fits = list(reversed([pi for pi in po.get_saveable_aux_plots()])) self._keys = [fi.name for fi in self._fits] unks = self._get_valid_unknowns(state.unknowns) if unks: if self.check_refit(unks): return fs = progress_loader(unks, self._assemble_result, threshold=1, step=10) if self.editor: self.editor.analysis_groups = [(ai,) for ai in unks] self._set_saveable(state) if fs: e = IsoEvolutionResultsEditor(fs) # e.plotter_options = po state.editors.append(e)
def _graphical_filter_button_fired(self): # print 'ffffassdf' self.debug('doing graphical filter') from pychron.processing.tasks.browser.graphical_filter import GraphicalFilterModel, GraphicalFilterView sams = self.selected_samples if not sams: sams = self.samples db = self.db with db.session_ctx(): if sams: lns = [si.identifier for si in sams] lpost, hpost = db.get_min_max_analysis_timestamp(lns) ams = ms = db.get_analysis_mass_spectrometers(lns) force = False else: force = True lpost = datetime.now() - timedelta( hours=self.search_criteria.recent_hours) hpost = datetime.now() ams = [mi.name for mi in db.get_mass_spectrometers()] ms = ams[:1] # if date range > X days make user fine tune range tdays = 3600 * 24 * max( 1, self.search_criteria.graphical_filtering_max_days) if force or (hpost - lpost).total_seconds() > tdays or len(ms) > 1: d = GraphicalFilterSelector(lpost=lpost, hpost=hpost, available_mass_spectrometers=ams, mass_spectrometers=ms) info = d.edit_traits(kind='livemodal') if info.result: lpost, hpost, ms = d.lpost, d.hpost, d.mass_spectrometers if not ms: self.warning_dialog( 'Please select at least one Mass Spectrometer') return else: return ans = db.get_analyses_date_range(lpost, hpost, order='asc', mass_spectrometers=ms) # ans = db.get_date_range_analyses(lpost, hpost, # ordering='asc', # spectrometer=ms) def func(xi, prog, i, n): if prog: prog.change_message('Loading {}-{}. {}'.format( i, n, xi.record_id)) return GraphicalRecordView(xi) ans = progress_loader(ans, func) if not ans: return gm = GraphicalFilterModel( analyses=ans, projects=[p.name for p in self.selected_projects]) gm.setup() gv = GraphicalFilterView(model=gm) info = gv.edit_traits(kind='livemodal') if info.result: ans = gm.get_selection() self.analysis_table.analyses = ans self._graphical_filter_hook(ans, gm.is_append)
def _graphical_filter_button_fired(self): # print 'ffffassdf' self.debug('doing graphical filter') from pychron.processing.tasks.browser.graphical_filter import GraphicalFilterModel, GraphicalFilterView sams = self.selected_samples if not sams: sams = self.samples db = self.db with db.session_ctx(): if sams: lns = [si.identifier for si in sams] lpost, hpost = db.get_min_max_analysis_timestamp(lns) ams = ms = db.get_analysis_mass_spectrometers(lns) force = False else: force = True lpost = datetime.now() - timedelta(hours=self.search_criteria.recent_hours) hpost = datetime.now() ams = [mi.name for mi in db.get_mass_spectrometers()] ms = ams[:1] # if date range > X days make user fine tune range tdays = 3600 * 24 * max(1, self.search_criteria.graphical_filtering_max_days) if force or (hpost - lpost).total_seconds() > tdays or len(ms) > 1: d = GraphicalFilterSelector(lpost=lpost, hpost=hpost, available_mass_spectrometers=ams, mass_spectrometers=ms) info = d.edit_traits(kind='livemodal') if info.result: lpost, hpost, ms = d.lpost, d.hpost, d.mass_spectrometers if not ms: self.warning_dialog('Please select at least one Mass Spectrometer') return else: return ans = db.get_analyses_date_range(lpost, hpost, order='asc', mass_spectrometers=ms) # ans = db.get_date_range_analyses(lpost, hpost, # ordering='asc', # spectrometer=ms) def func(xi, prog, i, n): if prog: prog.change_message('Loading {}-{}. {}'.format(i, n, xi.record_id)) return GraphicalRecordView(xi) ans = progress_loader(ans, func) if not ans: return gm = GraphicalFilterModel(analyses=ans, projects=[p.name for p in self.selected_projects]) gm.setup() gv = GraphicalFilterView(model=gm) info = gv.edit_traits(kind='livemodal') if info.result: ans = gm.get_selection() self.analysis_table.analyses = ans self._graphical_filter_hook(ans, gm.is_append)
class ExperimentRepoTask(BaseTask): name = 'Experiment Repositories' selected_repository_name = Str selected_local_repository_name = Instance(RepoItem) repository_names = List organization = Str oauth_token = Str local_names = List tool_bars = [ SToolBar(CloneAction(), AddBranchAction(), CheckoutBranchAction(), PushAction(), PullAction(), FindChangesAction()) ] commits = List _repo = None selected_commit = Any branch = Str branches = List def activated(self): self._preference_binder('pychron.dvc', ('organization', )) self._preference_binder('pychron.github', ('oauth_token', )) org = Organization(self.organization) org._oauth_token = self.oauth_token self.refresh_local_names() if self.confirmation_dialog('Check all Repositories for changes'): self.find_changes() self.repository_names = org.repo_names def refresh_local_names(self): self.local_names = [ RepoItem(name=i) for i in sorted(self.list_repos()) ] def find_changes(self, remote='origin', branch='master'): self.debug('find changes') def func(item, prog, i, n): name = item.name if prog: prog.change_message('Examining: {}({}/{})'.format(name, i, n)) self.debug('examining {}'.format(name)) r = Repo(os.path.join(paths.repository_dataset_dir, name)) try: r.git.fetch() line = r.git.log('{}/{}..HEAD'.format(remote, branch), '--oneline') item.dirty = bool(line) except GitCommandError, e: self.warning('error examining {}. {}'.format(name, e)) if self.selected_local_repository_name: names = (self.selected_local_repository_name, ) else: names = self.local_names progress_loader(names, func) self.local_names = sorted(self.local_names, key=lambda k: k.dirty, reverse=True)