Exemplo n.º 1
0
    def _make_level(self, doc, irrad, level,ids, ans):
        root = doc['root']
        options = doc['options']

        lroot = os.path.join(root, irrad, level)
        r_mkdir(lroot)

        n=len(ans)
        prog = self.open_progress(n, close_at_end=False)

        ans = self.make_analyses(ans, progress=prog, use_cache=False)
        #group by stepheat vs fusion
        pred = lambda x: bool(x.step)

        ans = sorted(ans, key=pred)
        stepheat, fusion = map(list, partition(ans, pred))

        # apred = lambda x: x.aliquot
        # stepheat = sorted(stepheat, key=apred)
        # if stepheat:
        #     self._make_editor(stepheat, 'step_heat', options, prog, ln_root, li)
        project='J'
        # lns=[li.identifier for li in level.labnumbers]

        if fusion:
            save_args=(lroot, level, '{} {}'.format(irrad, level),
                       project, ids)
            self._make_editor(fusion, ('fusion','fusion_grouped'),
                              options, prog, 'aliquot',
                              save_args)
        prog.close()
Exemplo n.º 2
0
    def group_level(self, level, irradiation=None, monitor_filter=None):
        if monitor_filter is None:
            def monitor_filter(pos):
                if pos.sample == 'FC-2':
                    return True

        db=self.db
        with db.session_ctx():
            if isinstance(level, str):
                level = db.get_level(level, irradiation)

            refs = []
            unks = []
            if level:
                positions = level.positions

                if positions:
                    def pos_factory(px):
                        ip = IrradiationPositionRecord()
                        ip.create(px)
                        return ip

                    positions = [pos_factory(pp) for pp in positions]
                    refs, unks = partition(positions, monitor_filter)

        return refs, unks
Exemplo n.º 3
0
    def _make_labnumber(self, li, root, options, prog):
        #make dir for labnumber
        ident = li.identifier
        ln_root = os.path.join(root, ident)
        r_mkdir(ln_root)

        prog.change_message('Making {} for {}'.format(self._tag, ident))

        #filter invalid analyses
        ans = filter(lambda x: not x.tag == 'invalid', li.analyses)

        #group by stepheat vs fusion
        pred = lambda x: bool(x.step)

        ans = sorted(ans, key=pred)
        stepheat, fusion = map(list, partition(ans, pred))

        apred = lambda x: x.aliquot
        stepheat = sorted(stepheat, key=apred)
        project = 'Minna Bluff'

        li = li.identifier
        if stepheat:
            key = lambda x: x.aliquot
            stepheat = sorted(stepheat, key=key)
            for aliquot, ais in groupby(stepheat, key=key):
                name = make_runid(li, aliquot, '')
                self._make_editor(ais, 'step_heat', options, prog, False,
                                  (ln_root, name, name, project, (li, )))
        if fusion:
            self._make_editor(fusion, 'fusion', options, prog, False,
                              (ln_root, li, li, project, (li, )))
Exemplo n.º 4
0
    def _load_associated_labnumbers(self, names):
        """
            names: list of project names
        """
        db = self.db
        sams = []
        with db.session_ctx():
            self._recent_mass_spectrometers = []
            warned = False

            rnames, onames = partition(names, lambda x: x.startswith('RECENT'))
            for name in rnames:
                # load associated samples
                if not self.search_criteria.recent_hours:
                    if not warned:
                        self.warning_dialog('Set "Recent Hours" in Preferences.\n'
                                            '"Recent Hours" is located in the "Processing" category')
                        warned = True
                else:
                    sams.extend(self._retrieve_recent_labnumbers(name))

            if list(onames):
                sams.extend(self._retrieve_labnumbers())

        self.samples = sams
        self.osamples = sams
Exemplo n.º 5
0
    def _make_paths(self, root, lpost=None, hpost=None):
        xs = [xi for xi in os.listdir(root) if not xi.startswith('.')]

        dirs, files = partition(xs, lambda x: not os.path.isfile(os.path.join(root, x)))

        if self.filter_hierarchy_str:
            files = (ci for ci in files if ci.startswith(self.filter_hierarchy_str))

        if self.date_filter == 'Modified':
            func = modified_datetime
        else:
            func = created_datetime

        if lpost:
            try:
                lpost = lpost.date()
            except AttributeError:
                pass
            files = (ci for ci in files
                     if func(os.path.join(root, ci), strformat=None).date() >= lpost)

        if hpost:
            try:
                hpost = hpost.date()
            except AttributeError:
                pass

            files = (ci for ci in files
                     if func(os.path.join(root, ci), strformat=None).date() <= hpost)

        files = [self._child_factory(ci) for ci in files]

        dirs = [self._directory_factory(di) for di in dirs]
        return dirs, files
Exemplo n.º 6
0
    def _make_level(self, doc, irrad, level, ids, ans):
        root = doc['root']
        options = doc['options']

        lroot = os.path.join(root, irrad, level)
        r_mkdir(lroot)

        n = len(ans)
        prog = self.open_progress(n, close_at_end=False)

        ans = self.make_analyses(ans, progress=prog, use_cache=False)
        #group by stepheat vs fusion
        pred = lambda x: bool(x.step)

        ans = sorted(ans, key=pred)
        stepheat, fusion = map(list, partition(ans, pred))

        # apred = lambda x: x.aliquot
        # stepheat = sorted(stepheat, key=apred)
        # if stepheat:
        #     self._make_editor(stepheat, 'step_heat', options, prog, ln_root, li)
        project = 'J'
        # lns=[li.identifier for li in level.labnumbers]

        if fusion:
            save_args = (lroot, level, '{} {}'.format(irrad,
                                                      level), project, ids)
            self._make_editor(fusion, ('fusion', 'fusion_grouped'), options,
                              prog, 'aliquot', save_args)
        prog.close()
Exemplo n.º 7
0
    def _make_paths(self, root, lpost=None, hpost=None):
        xs = [xi for xi in os.listdir(root) if not xi.startswith('.')]

        dirs, files = partition(xs, lambda x: not os.path.isfile(os.path.join(root, x)))

        if self.filter_hierarchy_str:
            files = (ci for ci in files if ci.startswith(self.filter_hierarchy_str))

        if self.date_filter == 'Modified':
            func = modified_datetime
        else:
            func = created_datetime

        if lpost:
            try:
                lpost = lpost.date()
            except AttributeError:
                pass
            files = (ci for ci in files
                     if func(os.path.join(root, ci), strformat=None).date() >= lpost)

        if hpost:
            try:
                hpost = hpost.date()
            except AttributeError:
                pass

            files = (ci for ci in files
                     if func(os.path.join(root, ci), strformat=None).date() <= hpost)

        files = [self._child_factory(ci) for ci in files]

        dirs = [self._directory_factory(di) for di in dirs]
        return dirs, files
Exemplo n.º 8
0
    def group_level(self, level, irradiation=None, monitor_filter=None):
        if monitor_filter is None:

            def monitor_filter(pos):
                if pos.sample == 'FC-2':
                    return True

        db = self.db
        with db.session_ctx():
            if isinstance(level, str):
                level = db.get_level(level, irradiation)

            refs = []
            unks = []
            if level:
                positions = level.positions

                if positions:

                    def pos_factory(px):
                        ip = IrradiationPositionRecord()
                        ip.create(px)
                        return ip

                    positions = [pos_factory(pp) for pp in positions]
                    refs, unks = partition(positions, monitor_filter)

        return refs, unks
Exemplo n.º 9
0
    def _load_associated_labnumbers(self, names):
        """
            names: list of project names
        """
        db = self.db
        sams = []
        with db.session_ctx():
            self._recent_mass_spectrometers = []
            warned = False

            rnames, onames = partition(names, lambda x: x.startswith('RECENT'))
            for name in rnames:
                # load associated samples
                if not self.search_criteria.recent_hours:
                    if not warned:
                        self.warning_dialog(
                            'Set "Recent Hours" in Preferences.\n'
                            '"Recent Hours" is located in the "Processing" category'
                        )
                        warned = True
                else:
                    sams.extend(self._retrieve_recent_labnumbers(name))

            if list(onames):
                sams.extend(self._retrieve_labnumbers())

        self.samples = sams
        self.osamples = sams
Exemplo n.º 10
0
    def _make_labnumber(self, li, root, options, prog):
        #make dir for labnumber
        ident = li.identifier
        ln_root = os.path.join(root, ident)
        r_mkdir(ln_root)

        prog.change_message('Making {} for {}'.format(self._tag, ident))

        #filter invalid analyses
        ans=filter(lambda x: not x.tag=='invalid', li.analyses)

        #group by stepheat vs fusion
        pred = lambda x: bool(x.step)

        ans = sorted(ans, key=pred)
        stepheat, fusion = map(list, partition(ans, pred))

        apred = lambda x: x.aliquot
        stepheat = sorted(stepheat, key=apred)
        project='Minna Bluff'

        li = li.identifier
        if stepheat:
            key=lambda x: x.aliquot
            stepheat=sorted(stepheat, key=key)
            for aliquot, ais in groupby(stepheat, key=key):
                name=make_runid(li, aliquot, '')
                self._make_editor(ais, 'step_heat', options, prog, False,
                                  (ln_root, name, name, project, (li,)))
        if fusion:
            self._make_editor(fusion, 'fusion', options, prog, False,
                              (ln_root, li, li, project, (li,)))
Exemplo n.º 11
0
def make_sample_points(geo):
    db = geo.db
    pr = 'Minna Bluff'
    missing = []
    groups = []
    with db.session_ctx():
        v = db.get_project(pr)
        samples = v.samples
        yr1, yr2 = partition(samples, lambda x: x.name.startswith('MB06'))
        for name, sams in (('MB06_all_samples', yr1), ('MB07_all_samples',
                                                       yr2)):
            pts = []
            for s in sams:
                if not s.lat:
                    missing.append((s.name, s.alt_name))
                else:
                    print 'adding {} lat: {:0.5f} long: {:0.5f}'.format(
                        s.name, s.lat, s.long)
                    x, y = proj_pt(s.lat, s.long)

                    pt = SamplePoint(
                        x, y, s.name, s.material.name if s.material else '',
                        ','.join([li.identifier for li in s.labnumbers]))
                    pts.append(pt)
            groups.append((name, pts))
    return groups
    def save(self, name=None):
        if name is None:
            name = self.name
        bfs, sfs = partition(self.fits, lambda x: x.is_baseline)
        yd = {'signal': self._dump(sfs), 'baseline': self._dump(bfs)}

        p = os.path.join(paths.fits_dir, '{}.yaml'.format(name))
        with open(p, 'w') as wfile:
            yaml.dump(yd, wfile, default_flow_style=False)
Exemplo n.º 13
0
    def save(self, name=None):
        if name is None:
            name = self.name
        bfs, sfs = partition(self.fits, lambda x: x.is_baseline)
        yd = {'signal': self._dump(sfs),
              'baseline': self._dump(bfs)}

        p = os.path.join(paths.fits_dir, '{}.yaml'.format(name))
        with open(p, 'w') as wfile:
            yaml.dump(yd, wfile, default_flow_style=False)
Exemplo n.º 14
0
    def _get_analyses(self, li):
        #filter invalid analyses
        ans = filter(lambda x: not x.tag == 'invalid', li.analyses)

        #group by stepheat vs fusion
        pred = lambda x: bool(x.step)

        ans = sorted(ans, key=pred)
        stepheat, fusion = map(list, partition(ans, pred))
        return stepheat
Exemplo n.º 15
0
    def _get_analyses(self, li):
        #filter invalid analyses
        ans = filter(lambda x: not x.tag == 'invalid', li.analyses)

        #group by stepheat vs fusion
        pred = lambda x: bool(x.step)

        ans = sorted(ans, key=pred)
        stepheat, fusion = map(list, partition(ans, pred))
        return stepheat
Exemplo n.º 16
0
    def _get_positions(self):

        hkey = lambda x: x.hole_id
        if self.tool.group_positions:
            mons, unks = map(list, partition(self.monitor_positions.itervalues(),
                                             lambda x: x.sample == self.tool.monitor.sample))
            return [ai for ps in (mons, unks)
                    for ai in sorted(ps, key=hkey)]

        else:
            return sorted(self.monitor_positions.itervalues(), key=hkey)
Exemplo n.º 17
0
    def _get_positions(self):

        hkey = lambda x: x.hole_id
        if self.tool.group_positions:
            mons, unks = map(
                list,
                partition(self.monitor_positions.itervalues(),
                          lambda x: x.sample == self.tool.monitor.sample))
            return [ai for ps in (mons, unks) for ai in sorted(ps, key=hkey)]

        else:
            return sorted(self.monitor_positions.itervalues(), key=hkey)
Exemplo n.º 18
0
    def run(self, state):
        dvc = self.dvc
        rs = []
        for ri in self.repositories:
            ans = dvc.get_repoository_analyses(ri)
            rs.extend(ans)

        unks, refs = partition(
            rs, predicate=lambda x: x.analysis_type == 'unknown')
        state.unknowns = unks
        state.references = refs
        self.unknowns = unks
        self.references = refs
Exemplo n.º 19
0
    def _dclicked_analysis_group_changed(self):
        if self.active_editor:
            if self.selected_analysis_groups:
                g = self.selected_analysis_groups[0]
                db = self.manager.db

                with db.session_ctx():
                    dbg = db.get_analysis_group(g.id, key='id')
                    unks, b = partition(dbg.analyses, lambda x: x.analysis_type.name == 'unknown')

                    self.active_editor.auto_find = False
                    self.active_editor.set_items([ai.analysis for ai in unks])
                    self._dclicked_analysis_group_hook(unks, b)
Exemplo n.º 20
0
def renumber_aliquots(aruns):
    akey = attrgetter('user_defined_aliquot')

    for ln, ans in groupby_key(aruns, 'labnumber'):
        if is_special(ln):
            continue

        b, a = partition(ans, akey)
        b = list(b)
        if b:
            minaliquot = min([bi.user_defined_aliquot for bi in b])
            for i, (al, ans) in enumerate(groupby(b, key=akey)):
                for ai in ans:
                    ai.user_defined_aliquot = minaliquot + i
Exemplo n.º 21
0
    def randomize_unknowns(self):
        """
        1. get indices of non unknowns
        2. partition into two lists unks, non-unks
        3. randomize unks
        4. insert non-unks back in using original indices

        :return:
        """

        aruns = self.automated_runs[:]

        def predicate(x):
            return not x.skip

        skip_idx = [i for i, a in enumerate(aruns) if not predicate(a)]

        aruns, skipped = partition(aruns, predicate=predicate)

        def predicate(x):
            return x.analysis_type == 'unknown'

        idx = [i for i, a in enumerate(aruns) if not predicate(a)]

        unks, refs = partition(aruns, predicate=predicate)

        unks = list(unks)
        refs = list(refs)
        for i, r in list(zip(idx, refs)):
            unks.insert(i, r)

        for i, r in list(zip(skip_idx, skipped)):
            unks.insert(i, r)

        self.automated_runs = unks
        self.refresh_table_needed = True
Exemplo n.º 22
0
    def _dclicked_analysis_group_changed(self):
        if self.active_editor:
            if self.browser_model.selected_analysis_groups:
                g = self.browser_model.selected_analysis_groups[0]
                db = self.manager.db

                with db.session_ctx():
                    dbg = db.get_analysis_group(g.id, key='id')
                    unks, b = partition(
                        dbg.analyses,
                        lambda x: x.analysis_type.name == 'unknown')

                    self.active_editor.auto_find = False
                    self.active_editor.set_items([ai.analysis for ai in unks])
                    self._dclicked_analysis_group_hook(unks, b)
Exemplo n.º 23
0
    def _retrieve_labnumbers(self):
        es = []
        ps = []
        ms = []
        if self.mass_spectrometers_enabled:
            if self.mass_spectrometer_includes:
                ms = self.mass_spectrometer_includes

        principal_investigator = None
        if self.principal_investigator_enabled:
            principal_investigator = self.principal_investigator

        if self.repository_enabled:
            if self.selected_repositories:
                es = [e.name for e in self.selected_repositories]
        if self.project_enabled:
            if self.selected_projects:
                rs, ps = partition([p.name for p in self.selected_projects],
                                   lambda x: x.startswith('RECENT'))
                ps, rs = list(ps), list(rs)
                if rs:
                    hpost = datetime.now()
                    lpost = hpost - timedelta(
                        hours=self.search_criteria.recent_hours)
                    self._low_post = lpost

                    self.use_high_post = False
                    self.use_low_post = True

                    self.trait_property_changed('low_post', self._low_post)
                    for ri in rs:
                        mi = extract_mass_spectrometer_name(ri)
                        if mi not in ms:
                            ms.append(mi)
                        self._recent_mass_spectrometers.append(mi)

        ls = self.db.get_labnumbers(
            principal_investigator=principal_investigator,
            projects=ps,
            repositories=es,
            mass_spectrometers=ms,
            irradiation=self.irradiation if self.irradiation_enabled else None,
            level=self.level if self.irradiation_enabled else None,
            analysis_types=self.analysis_include_types
            if self.use_analysis_type_filtering else None,
            high_post=self.high_post if self.use_high_post else None,
            low_post=self.low_post if self.use_low_post else None)
        return ls
Exemplo n.º 24
0
    def dump_fits(self, keys, reviewed=False):

        sisos = self.isotopes
        isoks, dks = map(tuple, partition(keys, lambda x: x in sisos))

        def update(d, i):
            fd = i.filter_outliers_dict
            d.update(fit=i.fit,
                     value=float(i.value),
                     error=float(i.error),
                     n=i.n,
                     fn=i.fn,
                     include_baseline_error=i.include_baseline_error,
                     filter_outliers=fd.get('filter_outliers', False),
                     iterations=fd.get('iterations', 0),
                     std_devs=fd.get('std_devs', 0))

        # save intercepts
        if isoks:
            isos, path = self._get_json('intercepts')
            for k in isoks:
                try:
                    iso = isos[k]
                    siso = sisos[k]
                    update(iso, siso)
                except KeyError:
                    pass

            isos['reviewed'] = reviewed
            self._dump(isos, path)

        # save baselines
        if dks:
            baselines, path = self._get_json('baselines')
            for di in dks:
                try:
                    det = baselines[di]
                except KeyError:
                    det = {}
                    baselines[di] = det

                bs = next(
                    (iso.baseline
                     for iso in sisos.itervalues() if iso.detector == di),
                    None)
                update(det, bs)

            self._dump(baselines, path)
Exemplo n.º 25
0
def renumber_aliquots(aruns):
    key = lambda x: x.labnumber
    akey = lambda x: x.user_defined_aliquot

    aruns = sorted(aruns, key=key)
    for ln, ans in groupby(aruns, key=key):
        if is_special(ln):
            continue

        b, a = partition(ans, akey)
        b = list(b)
        if b:
            minaliquot = min([bi.user_defined_aliquot for bi in b])
            for i, (al, ans) in enumerate(groupby(b, key=akey)):
                for ai in ans:
                    ai.user_defined_aliquot = minaliquot + i
Exemplo n.º 26
0
    def save_analysis_data_table(self, p):


        # ans=[]
        db=self.processor.db

        with db.session_ctx():
            ias=self.interpreted_ages[:10]

            ans = [si.analysis for ia in ias
                   for si in db.get_interpreted_age_history(ia.id).interpreted_age.sets
                   if si.analysis.tag!='invalid']
            prog = self.processor.open_progress(len(ans), close_at_end=False)
            # hid = db.get_interpreted_age_history(ia.id)
            # dbia = hid.interpreted_age
            # ans.extend([si.analysis for si in db.get_interpreted_age_history(ia.id).interpreted_age.sets
            #         if not si.analysis.tag == 'invalid'])

            # groups=[]

            def gfactory(klass, ia):
                hid = db.get_interpreted_age_history(ia.id)
                ans = (si.analysis for si in hid.interpreted_age.sets \
                       if not si.analysis.tag == 'invalid')
                ans = self.processor.make_analyses(ans,
                                                   calculate_age=True, use_cache=False,
                                                   progress=prog)
                return klass(sample=ans[0].sample, analyses=ans)

            #partition fusion vs stepheat
            fusion, step_heat=partition(ias, lambda x: x.age_kind=='Weighted Mean')
            # for ia in step_heat:
            #     groups.append(klass(sample=ans[0].sample,
            #                         analyses=ans))
            shgroups=[gfactory(StepHeatAnalysisGroup, ia) for ia in step_heat]
            fgroups=[gfactory(AnalysisGroup, ia) for ia in fusion]
            prog.close()

        head, ext=os.path.splitext(p)
        if shgroups:
            w = StepHeatPDFTableWriter()
            p='{}.step_heat_data{}'.format(head, ext)
            w.build(p, shgroups, title=self.get_title())
        if fgroups:
            w=FusionPDFTableWriter()
            p='{}.fusion_data{}'.format(head, ext)
            w.build(p,fgroups, title=self.get_title())
Exemplo n.º 27
0
    def dump_fits(self, keys, reviewed=False):

        sisos = self.isotopes
        isoks, dks = list(map(tuple, partition(keys, lambda x: x in sisos)))

        def update(d, i):
            d.update(fit=i.fit, value=float(i.value), error=float(i.error),
                     n=i.n, fn=i.fn,
                     reviewed=reviewed,
                     include_baseline_error=i.include_baseline_error,
                     filter_outliers_dict=i.filter_outliers_dict,
                     user_excluded=i.user_excluded,
                     outlier_excluded=i.outlier_excluded)

        # save intercepts
        if isoks:
            isos, path = self._get_json('intercepts')
            for k in isoks:
                try:
                    iso = isos[k]
                except KeyError:
                    iso = {}
                    isos[k] = iso

                siso = sisos[k]
                if siso:
                    update(iso, siso)

            self._dump(isos, path)

        # save baselines
        if dks:
            baselines, path = self._get_json('baselines')
            for di in dks:
                try:
                    det = baselines[di]
                except KeyError:
                    det = {}
                    baselines[di] = det

                # bs = next((iso.baseline for iso in six.itervalues(sisos) if iso.detector == di), None)
                bs = self.get_isotope(detector=di, kind='baseline')
                if bs:
                    update(det, bs)

            self._dump(baselines, path)
Exemplo n.º 28
0
    def save_analysis_data_table(self, p):

        # ans=[]
        db = self.processor.db

        with db.session_ctx():
            ias = self.interpreted_ages[:10]

            ans = [
                si.analysis
                for ia in ias
                for si in db.get_interpreted_age_history(ia.id).interpreted_age.sets
                if si.analysis.tag != "invalid"
            ]
            prog = self.processor.open_progress(len(ans), close_at_end=False)
            # hid = db.get_interpreted_age_history(ia.id)
            # dbia = hid.interpreted_age
            # ans.extend([si.analysis for si in db.get_interpreted_age_history(ia.id).interpreted_age.sets
            #         if not si.analysis.tag == 'invalid'])

            # groups=[]

            def gfactory(klass, ia):
                hid = db.get_interpreted_age_history(ia.id)
                ans = (si.analysis for si in hid.interpreted_age.sets if not si.analysis.tag == "invalid")
                ans = self.processor.make_analyses(ans, calculate_age=True, use_cache=False, progress=prog)
                return klass(sample=ans[0].sample, analyses=ans)

            # partition fusion vs stepheat
            fusion, step_heat = partition(ias, lambda x: x.age_kind == "Weighted Mean")
            # for ia in step_heat:
            #     groups.append(klass(sample=ans[0].sample,
            #                         analyses=ans))
            shgroups = [gfactory(StepHeatAnalysisGroup, ia) for ia in step_heat]
            fgroups = [gfactory(AnalysisGroup, ia) for ia in fusion]
            prog.close()

        head, ext = os.path.splitext(p)
        if shgroups:
            w = StepHeatPDFTableWriter()
            p = "{}.step_heat_data{}".format(head, ext)
            w.build(p, shgroups, title=self.get_title())
        if fgroups:
            w = FusionPDFTableWriter()
            p = "{}.fusion_data{}".format(head, ext)
            w.build(p, fgroups, title=self.get_title())
Exemplo n.º 29
0
    def _retrieve_labnumbers(self):
        es = []
        ps = []
        ms = []
        if self.mass_spectrometers_enabled:
            if self.mass_spectrometer_includes:
                ms = self.mass_spectrometer_includes

        principal_investigator = None
        if self.principal_investigator_enabled:
            principal_investigator = self.principal_investigator

        if self.repository_enabled:
            if self.selected_repositories:
                es = [e.name for e in self.selected_repositories]
        if self.project_enabled:
            if self.selected_projects:
                rs, ps = partition([p.name for p in self.selected_projects], lambda x: x.startswith('RECENT'))
                ps, rs = list(ps), list(rs)
                if rs:
                    hpost = datetime.now()
                    lpost = hpost - timedelta(hours=self.search_criteria.recent_hours)
                    self._low_post = lpost

                    self.use_high_post = False
                    self.use_low_post = True

                    self.trait_property_changed('low_post', self._low_post)
                    for ri in rs:
                        mi = extract_mass_spectrometer_name(ri)
                        if mi not in ms:
                            ms.append(mi)
                        self._recent_mass_spectrometers.append(mi)

        ls = self.db.get_labnumbers(principal_investigator=principal_investigator,
                                    projects=ps, repositories=es,
                                    mass_spectrometers=ms,
                                    irradiation=self.irradiation if self.irradiation_enabled else None,
                                    level=self.level if self.irradiation_enabled else None,
                                    analysis_types=self.analysis_include_types if self.use_analysis_type_filtering else None,
                                    high_post=self.high_post if self.use_high_post else None,
                                    low_post=self.low_post if self.use_low_post else None)
        return ls
Exemplo n.º 30
0
    def dump_fits(self, keys, reviewed=False):

        sisos = self.isotopes
        isoks, dks = list(map(tuple, partition(keys, lambda x: x in sisos)))

        def update(d, i):
            fd = i.filter_outliers_dict
            d.update(fit=i.fit, value=float(i.value), error=float(i.error),
                     n=i.n, fn=i.fn,
                     reviewed=reviewed,
                     include_baseline_error=i.include_baseline_error,
                     filter_outliers_dict=fd)

        # save intercepts
        if isoks:
            isos, path = self._get_json('intercepts')
            for k in isoks:
                try:
                    iso = isos[k]
                    siso = sisos[k]
                    if siso:
                        update(iso, siso)
                except KeyError:
                    pass

            self._dump(isos, path)

        # save baselines
        if dks:
            baselines, path = self._get_json('baselines')
            for di in dks:
                try:
                    det = baselines[di]
                except KeyError:
                    det = {}
                    baselines[di] = det

                # bs = next((iso.baseline for iso in six.itervalues(sisos) if iso.detector == di), None)
                bs = self.get_isotope(detector=di, kind='baseline')
                if bs:
                    update(det, bs)

            self._dump(baselines, path)
Exemplo n.º 31
0
    def dump_fits(self, keys, reviewed=False):

        sisos = self.isotopes
        isoks, dks = map(tuple, partition(keys, lambda x: x in sisos))

        def update(d, i):
            fd = i.filter_outliers_dict
            d.update(fit=i.fit, value=float(i.value), error=float(i.error),
                     n=i.n, fn=i.fn,
                     include_baseline_error=i.include_baseline_error,
                     filter_outliers=fd.get('filter_outliers', False),
                     iterations=fd.get('iterations', 0),
                     std_devs=fd.get('std_devs', 0))

        # save intercepts
        if isoks:
            isos, path = self._get_json('intercepts')
            for k in isoks:
                try:
                    iso = isos[k]
                    siso = sisos[k]
                    update(iso, siso)
                except KeyError:
                    pass

            isos['reviewed'] = reviewed
            self._dump(isos, path)

        # save baselines
        if dks:
            baselines, path = self._get_json('baselines')
            for di in dks:
                try:
                    det = baselines[di]
                except KeyError:
                    det = {}
                    baselines[di] = det

                bs = next((iso.baseline for iso in sisos.itervalues() if iso.detector == di), None)
                update(det, bs)

            self._dump(baselines, path)
Exemplo n.º 32
0
    def _assemble_groups(self, ias):
        db = self.processor.db

        with db.session_ctx():
            # ias = [ia for ia in ias if ia.age_kind == 'Weighted Mean'][:1]

            ans = [si.analysis for ia in ias
                   for si in db.get_interpreted_age_history(ia.id).interpreted_age.sets]

            prog = self.processor.open_progress(len(ans), close_at_end=False)

            def gfactory(klass, dbia):
                hid = db.get_interpreted_age_history(dbia.id)
                ia_ans = (si.analysis for si in hid.interpreted_age.sets)
                all_ans = self.processor.make_analyses(ia_ans,
                                                       calculate_age=True,
                                                       use_cache=False,
                                                       progress=prog)
                #overwrite the tags for the analyses
                for ai, sai in zip(all_ans, ia_ans):
                    ai.set_tag(sai.tag)

                ais = [ai for ai in all_ans if not 'omit' in ai.tag]
                return klass(sample=ais[0].sample,
                             all_analyses=all_ans,
                             analyses=ais)

            #partition fusion vs stepheat
            fusion, step_heat = partition(ias, lambda x: x.age_kind == 'Weighted Mean')

            shgroups = [(ia, gfactory(StepHeatAnalysisGroup, ia)) for ia in step_heat]
            # shgroups = [(ia, gfactory(StepHeatAnalysisGroup, ia)) for ia in list(step_heat)[:3]]
            # shgroups =[]

            # fgroups = [(ia, gfactory(AnalysisGroup, ia)) for ia in fusion]
            # fgroups = [(ia, gfactory(AnalysisGroup, ia)) for ia in list(fusion)[:3]]
            fgroups = []

            prog.close()

        return shgroups, fgroups
Exemplo n.º 33
0
    def save_analysis_data_tables(self, root, pdf=True, xls=True,
                                  xls_summary=False,
                                  auto_view=False):

        ias = self.interpreted_ages
        db = self.processor.db
        with db.session_ctx():
            #partition into map/argus
            def pred(ia):
                hid = db.get_interpreted_age_history(ia.id)
                ref = hid.interpreted_age.sets[0].analysis
                return ref.measurement.mass_spectrometer.name.lower() == 'map'

            part = partition(ias, pred)
            map_spec, argus = map(list, part)

        if pdf:
            self.debug('saving pdf tables')
            step_heat_title = 'Table E.1 MAP Step Heat <sup>40</sup>Ar/<sup>39</sup>Ar Data'
            fusion_title = 'Table D.1 MAP Fusion <sup>40</sup>Ar/<sup>39</sup>Ar Data'
            # self._save_pdf_data_table(root, map_spec, step_heat_title, fusion_title, 'map',
            #                           auto_view=auto_view)

            step_heat_title = 'Table G.1 Argus Step Heat <sup>40</sup>Ar/<sup>39</sup>Ar Data'
            fusion_title = 'Table F.1 Argus Fusion <sup>40</sup>Ar/<sup>39</sup>Ar Data'
            self._save_pdf_data_table(root, argus, step_heat_title, fusion_title, 'argus',
                                      auto_view=auto_view)
        if xls:
            self.debug('saving xls tables')
            step_heat_title = 'Table 1. MAP Step heat 40Ar/39Ar Data'
            fusion_title = 'Table 2. MAP Fusion 40Ar/39Ar Data'
            self._save_xls_data_table(root, map_spec, step_heat_title, fusion_title, 'map',
                                      summary_sheet=xls_summary,
                                      auto_view=auto_view)

            step_heat_title = 'Table 3. Argus Step heat 40Ar/39Ar  Data'
            fusion_title = 'Table 4. Argus Fusion 40Ar/39Ar Data'
            self._save_xls_data_table(root, argus, step_heat_title, fusion_title, 'argus',
                                      summary_sheet=xls_summary,
                                      auto_view=auto_view)
Exemplo n.º 34
0
    def _make_multi_panel_labnumbers(self, ans, cnt):

        root = self._config_options['root']
        options = self._config_options['options']
        # ans = [ai for li in lns for ai in li.analyses]
        # ans = filter(lambda x: not x.tag == 'invalid', ans)
        # prog = self.open_progress(len(ans), close_at_end=False)
        # ans = self.make_analyses(ans,
        #                          progress=prog,
        #                          use_cache=False)
        # print lns
        lns = list({ai.labnumber.identifier for ai in ans})
        print len(lns)
        prog = None
        pred = lambda x: bool(x.step)

        ident = ','.join([li for li in lns])
        li = ident

        ident = '{:03d}-{}'.format(cnt, ident)
        ln_root = os.path.join(root, ident)
        r_mkdir(ln_root)
        ans = sorted(ans, key=pred)
        stepheat, fusion = map(list, partition(ans, pred))
        project = 'Minna Bluff'
        if stepheat and options.has_key('step_heat'):
            # key = lambda x: x.aliquot
            # stepheat = sorted(stepheat, key=key)
            # for aliquot, ais in groupby(stepheat, key=key):
            # name = make_runid(li, aliquot, '')
            self._make_editor(
                ans, 'step_heat', options, prog, False,
                lambda x: '{}-{}'.format(x.identifier, x.aliquot),
                (ln_root, 'spec', li, project, (li, )))

        if fusion and options.has_key('fusion'):
            self._make_editor(fusion, 'fusion', options, prog, False,
                              lambda x: x.identifier,
                              (ln_root, 'fig', li, project, (li, )))
Exemplo n.º 35
0
def make_sample_points(geo):
    db = geo.db
    pr = 'Minna Bluff'
    missing = []
    groups = []
    with db.session_ctx():
        v = db.get_project(pr)
        samples = v.samples
        yr1, yr2 = partition(samples, lambda x: x.name.startswith('MB06'))
        for name, sams in (('MB06_all_samples', yr1),
                           ('MB07_all_samples', yr2)):
            pts = []
            for s in sams:
                if not s.lat:
                    missing.append((s.name, s.alt_name))
                else:
                    print 'adding {} lat: {:0.5f} long: {:0.5f}'.format(s.name, s.lat, s.long)
                    x, y = proj_pt(s.lat, s.long)

                    pt = SamplePoint(x, y, s.name, s.material.name if s.material else '',
                                     ','.join([li.identifier for li in s.labnumbers]))
                    pts.append(pt)
            groups.append((name, pts))
    return groups
    def _calculate_cached_result_points(self, comp):
        choles, fholes = partition(self.results, lambda r: r[1])

        cpts = comp.map_screen([(x, y) for (x, y), _ in choles])
        fpts = comp.map_screen([(x, y) for (x, y), _ in fholes])
        self._cached_result_pts = (cpts, (0, 1, 1), 2), (fpts, (1, 0, 0), 3)
Exemplo n.º 37
0
    def group_labnumbers(self, ls):
        def monitor_filter(pos):
            return pos.sample.name == 'FC-2'

        return partition(ls, monitor_filter)
Exemplo n.º 38
0
    def _calculate_cached_result_points(self, comp):
        choles, fholes = partition(self.results, lambda r: r[1])

        cpts = comp.map_screen([(x, y) for (x, y), _ in choles])
        fpts = comp.map_screen([(x, y) for (x, y), _ in fholes])
        self._cached_result_pts = (cpts, (0, 1, 1), 2), (fpts, (1, 0, 0), 3)
Exemplo n.º 39
0
    def make_analyses(self, ans,
                      progress=None,
                      exclude=None,
                      use_cache=True,
                      **kw):
        """
            loading the analysis' signals appears to be the most expensive operation.
            the majority of the load time is in _construct_analysis
        """

        if exclude:
            ans = self.filter_analysis_tag(ans, exclude)

        if not ans:
            return []

        db=self.db
        with db.session_ctx():
            if ans:
                #partition into DBAnalysis vs IsotopeRecordView
                db_ans, no_db_ans = map(list, partition(ans, lambda x: isinstance(x, DBAnalysis)))

                if no_db_ans:
                    #partition into cached and non cached analyses
                    cached_ans, no_db_ans = partition(no_db_ans,
                                                      lambda x: x.uuid in ANALYSIS_CACHE)

                    cached_ans = list(cached_ans)

                    #add analyses from cache to db_ans
                    db_ans.extend([ANALYSIS_CACHE[ci.uuid] for ci in cached_ans])

                    #increment value in cache_count
                    for ci in cached_ans:
                        self._add_to_cache(ci)

                    #load remaining analyses
                    no_db_ans = list(no_db_ans)
                    n = len(no_db_ans)
                    if n:

                        if self.use_vcs:
                            #clone the necessary project repositories
                            def f(x):
                                try:
                                    return x.labnumber.sample.project.name
                                except AttributeError:
                                    pass
                            prs=filter(lambda x: not x is None, (f(ai) for ai in no_db_ans))
                            self.vcs.clone_project_repos(prs)

                        if n > 1:
                            if progress is not None:
                                if progress.max < (n + progress.get_value()):
                                    progress.increase_max(n+2)
                            else:
                                progress = self._open_progress(n+2)

                        new_ans=[]
                        for i, ai in enumerate(no_db_ans):
                            if progress:
                                if progress.canceled:
                                    self.debug('canceling make analyses')
                                    db_ans=[]
                                    new_ans=[]
                                    break
                                elif progress.accepted:
                                    self.debug('accepting {}/{} analyses'.format(i, n))
                                    break

                            a = self._construct_analysis(ai, progress, **kw)
                            if a:
                                if use_cache:
                                    self._add_to_cache(a)
                                new_ans.append(a)

                                # if progress:
                                #     progress.on_trait_change(self._progress_closed,
                                #                              'closed', remove=True)

                        db_ans.extend(new_ans)

                        # self.debug('use vcs {}'.format(self.use_vcs))
                        # if self.use_vcs:
                        #     if progress:
                        #         progress.increase_max(len(new_ans)+1)
                        #         progress.change_message('Adding analyses to vcs')
                        #
                        #     self.vcs.add_analyses(new_ans, progress=progress)

                        self.debug('use offline database {}'.format(self.use_offline_database))
                        if self.use_offline_database:
                            if progress:
                                progress.increase_max(len(new_ans)+1)
                                progress.change_message('Transfering analyses for offline usage')
                            self.offline_bridge.add_analyses(db, new_ans, progress=progress)

                if progress:
                    progress.soft_close()

                return db_ans
Exemplo n.º 40
0
    def _easy_func(self, ep, manager):
        db = self.manager.db

        doc = ep.doc('blanks')
        fits = doc['blank_fit_isotopes']
        projects = doc['projects']

        unks = [ai for proj in projects
                for si in db.get_samples(project=proj)
                for ln in si.labnumbers
                for ai in ln.analyses
                if ai.measurement.mass_spectrometer.name == 'MAP'
            and ai.extraction.extraction_device.name in ('Furnace', 'Eurotherm')]
        # for proj in projects:
        #     for si in db.get_samples(project=proj):
        #         for ln in si.labnumbers:
        #             for ai in ln.analyses:
        #                 print ai.measurement.mass_spectrometer.name,ai.extraction.extraction_device.name
        #                 print ai.measurement.mass_spectrometer.name == 'nmgrl map' and ai.extraction.extraction_device.name in ('Furnace','Eurotherm')
        print len(unks)
        prog = manager.progress
        # prog = self.manager.open_progress(len(ans) + 1)
        #bin analyses
        prog.increase_max(len(unks))

        preceding_fits, non_preceding_fits = map(list, partition(fits, lambda x: x['fit'] == 'preceding'))
        if preceding_fits:
            for ai in unks:
                if prog.canceled:
                    return
                elif prog.accepted:
                    break
                l, a, s = ai.labnumber.identifier, ai.aliquot, ai.step
                prog.change_message('Save preceding blank for {}-{:02n}{}'.format(l, a, s))
                hist = db.add_history(ai, 'blanks')
                ai.selected_histories.selected_blanks = hist
                for fi in preceding_fits:
                    self._preceding_correct(db, fi, ai, hist)

        #make figure root dir
        if doc['save_figures']:
            root = doc['figure_root']
            r_mkdir(root)

        with no_auto_ctx(self.active_editor):
            if non_preceding_fits:
                for fi in self.active_editor.tool.fits:
                    fi.fit = 'average'
                    fi.error_type = 'SEM'
                    fi.filter_outliers = True
                    fi.filter_iterations = 1
                    fi.filter_std_devs = 2

                for ais in bin_analyses(unks):
                    if prog.canceled:
                        return
                    elif prog.accepted:
                        break

                    self.active_editor.set_items(ais, progress=prog)
                    self.active_editor.find_references(progress=prog)

                    #refresh graph
                    # invoke_in_main_thread(self.active_editor.rebuild_graph)
                    #
                    # if not manager.wait_for_user():
                    #     return

                    #save a figure
                    if doc['save_figures']:
                        title = self.active_editor.make_title()
                        p = os.path.join(root, add_extension(title, '.pdf'))
                        self.active_editor.save_file(p)

                    self.active_editor.save(progress=prog)

                    self.active_editor.dump_tool()
        return True
Exemplo n.º 41
0
    def make_analyses(self, ans,
                      progress=None,
                      use_progress=True,
                      exclude=None,
                      use_cache=True,
                      unpack=False,
                      calculate_age=False,
                      calculate_F=False,
                      load_aux=False,
                      **kw):
        """
            loading the analysis' signals appears to be the most expensive operation.
            the majority of the load time is in _construct_analysis
        """
        if exclude:
            ans = self.filter_analysis_tag(ans, exclude)

        if not ans:
            self.debug('no analyses to load')
            return []

        db = self.db
        with db.session_ctx():
            # partition into DBAnalysis vs IsotopeRecordView
            db_ans, no_db_ans = map(list, partition(ans, lambda x: isinstance(x, DBAnalysis)))
            self._calculate_cached_ages(db_ans, calculate_age, calculate_F)
            if unpack:
                for di in db_ans:
                    if not di.has_raw_data:
                        no_db_ans.append(di)
                        db_ans.remove(di)

            if load_aux:
                for di in db_ans:
                    if not di.has_changes:
                        if di not in no_db_ans:
                            no_db_ans.append(di)
                        db_ans.remove(di)

            if no_db_ans:
                if use_cache:
                    # partition into cached and non cached analyses
                    cached_ans, no_db_ans = partition(no_db_ans,
                                                      lambda x: x.uuid in ANALYSIS_CACHE)
                    cached_ans = list(cached_ans)
                    no_db_ans = list(no_db_ans)

                    cns = [ANALYSIS_CACHE[ci.uuid] for ci in cached_ans]

                    # if unpack is true make sure cached analyses have raw data
                    if unpack or load_aux:
                        if unpack:
                            a, b = self._unpack_cached_analyses(cns, calculate_age, calculate_F)
                            db_ans.extend(a)
                            no_db_ans.extend(b)
                        if load_aux:
                            a, b = self._load_aux_cached_analyses(cns)
                            db_ans.extend(a)
                            no_db_ans.extend(b)
                    else:
                        self._calculate_cached_ages(cns, calculate_age, calculate_F)
                        # add analyses from cache to db_ans
                        db_ans.extend(cns)

                    # increment value in cache_count
                    self._increment_cache(cached_ans, use_cache)

                # load remaining analyses
                n = len(no_db_ans)
                if n:
                    # self._clone_vcs_repos(no_db_ans)
                    progress = self._setup_progress(n, progress, use_progress)
                    db_ans, new_ans = self._construct_analyses(no_db_ans, db_ans, progress,
                                                               calculate_age, calculate_F,
                                                               unpack, use_cache,
                                                               load_aux=load_aux, **kw)
                    db_ans.extend(new_ans)

                    # self.debug('use vcs {}'.format(self.use_vcs))
                    # if self.use_vcs:
                    # if progress:
                    #         progress.increase_max(len(new_ans)+1)
                    #         progress.change_message('Adding analyses to vcs')
                    #
                    #     self.vcs.add_analyses(new_ans, progress=progress)

                    # self.debug('use offline database {}'.format(self.use_offline_database))
                    # if self.use_offline_database:
                    #     if progress:
                    #         progress.increase_max(len(new_ans) + 1)
                    #         progress.change_message('Transfering analyses for offline usage')
                    #     self.offline_bridge.add_analyses(db, new_ans, progress=progress)

            if progress:
                progress.soft_close()
        
            return db_ans
Exemplo n.º 42
0
    def group_labnumbers(self, ls):
        def monitor_filter(pos):
            return pos.sample.name == 'FC-2'

        return partition(ls, monitor_filter)
Exemplo n.º 43
0
    def make_analyses(self,
                      ans,
                      progress=None,
                      exclude=None,
                      use_cache=True,
                      **kw):
        """
            loading the analysis' signals appears to be the most expensive operation.
            the majority of the load time is in _construct_analysis
        """

        if exclude:
            ans = self.filter_analysis_tag(ans, exclude)

        if not ans:
            return []

        db = self.db
        with db.session_ctx():
            if ans:
                #partition into DBAnalysis vs IsotopeRecordView
                db_ans, no_db_ans = map(
                    list, partition(ans, lambda x: isinstance(x, DBAnalysis)))

                if no_db_ans:
                    #partition into cached and non cached analyses
                    cached_ans, no_db_ans = partition(
                        no_db_ans, lambda x: x.uuid in ANALYSIS_CACHE)

                    cached_ans = list(cached_ans)

                    #add analyses from cache to db_ans
                    db_ans.extend(
                        [ANALYSIS_CACHE[ci.uuid] for ci in cached_ans])

                    #increment value in cache_count
                    for ci in cached_ans:
                        self._add_to_cache(ci)

                    #load remaining analyses
                    no_db_ans = list(no_db_ans)
                    n = len(no_db_ans)
                    if n:

                        if self.use_vcs:
                            #clone the necessary project repositories
                            def f(x):
                                try:
                                    return x.labnumber.sample.project.name
                                except AttributeError:
                                    pass

                            prs = filter(lambda x: not x is None,
                                         (f(ai) for ai in no_db_ans))
                            self.vcs.clone_project_repos(prs)

                        if n > 1:
                            if progress is not None:
                                if progress.max < (n + progress.get_value()):
                                    progress.increase_max(n + 2)
                            else:
                                progress = self._open_progress(n + 2)

                        new_ans = []
                        for i, ai in enumerate(no_db_ans):
                            if progress:
                                if progress.canceled:
                                    self.debug('canceling make analyses')
                                    db_ans = []
                                    new_ans = []
                                    break
                                elif progress.accepted:
                                    self.debug(
                                        'accepting {}/{} analyses'.format(
                                            i, n))
                                    break

                            a = self._construct_analysis(ai, progress, **kw)
                            if a:
                                if use_cache:
                                    self._add_to_cache(a)
                                new_ans.append(a)

                                # if progress:
                                #     progress.on_trait_change(self._progress_closed,
                                #                              'closed', remove=True)

                        db_ans.extend(new_ans)

                        # self.debug('use vcs {}'.format(self.use_vcs))
                        # if self.use_vcs:
                        #     if progress:
                        #         progress.increase_max(len(new_ans)+1)
                        #         progress.change_message('Adding analyses to vcs')
                        #
                        #     self.vcs.add_analyses(new_ans, progress=progress)

                        self.debug('use offline database {}'.format(
                            self.use_offline_database))
                        if self.use_offline_database:
                            if progress:
                                progress.increase_max(len(new_ans) + 1)
                                progress.change_message(
                                    'Transfering analyses for offline usage')
                            self.offline_bridge.add_analyses(db,
                                                             new_ans,
                                                             progress=progress)

                if progress:
                    progress.soft_close()

                return db_ans
Exemplo n.º 44
0
    def _easy_func(self, ep, manager):
        db=self.manager.db

        doc = ep.doc('blanks')
        fits = doc['blank_fit_isotopes']
        projects = doc['projects']

        unks = [ai for proj in projects
               for si in db.get_samples(project=proj)
               for ln in si.labnumbers
               for ai in ln.analyses]

        prog=manager.progress
        # prog = self.manager.open_progress(len(ans) + 1)
        #bin analyses
        prog.increase_max(len(unks))

        preceding_fits, non_preceding_fits=map(list,partition(fits, lambda x: x['fit']=='preceding'))
        if preceding_fits:
            for ai in unks:
                if prog.canceled:
                    return
                elif prog.accepted:
                    break
                l, a, s = ai.labnumber.identifier, ai.aliquot, ai.step
                prog.change_message('Save preceding blank for {}-{:02n}{}'.format(l, a, s))
                hist = db.add_history(ai, 'blanks')
                ai.selected_histories.selected_blanks = hist
                for fi in preceding_fits:
                    self._preceding_correct(db, fi, ai, hist)

        #make figure root dir
        if doc['save_figures']:
            root = doc['figure_root']
            r_mkdir(root)

        with no_auto_ctx(self.active_editor):
            if non_preceding_fits:
                for ais in bin_analyses(unks):
                    if prog.canceled:
                        return
                    elif prog.accepted:
                        break

                    self.active_editor.set_items(ais, progress=prog)
                    self.active_editor.find_references(progress=prog)

                    #refresh graph
                    invoke_in_main_thread(self.active_editor.rebuild_graph)

                    if not manager.wait_for_user():
                        return

                    #save a figure
                    if doc['save_figures']:
                        title=self.active_editor.make_title()
                        p=os.path.join(root, add_extension(title,'.pdf'))
                        self.active_editor.save_file(p)

                    self.active_editor.save(progress=prog)

                    self.active_editor.dump_tool()
        return True
Exemplo n.º 45
0
    def make_analyses(self,
                      ans,
                      progress=None,
                      use_progress=True,
                      exclude=None,
                      use_cache=True,
                      unpack=False,
                      calculate_age=False,
                      calculate_F=False,
                      load_aux=False,
                      **kw):
        """
            loading the analysis' signals appears to be the most expensive operation.
            the majority of the load time is in _construct_analysis
        """
        if exclude:
            ans = self.filter_analysis_tag(ans, exclude)

        if not ans:
            self.debug('no analyses to load')
            return []

        db = self.db
        with db.session_ctx():
            # partition into DBAnalysis vs IsotopeRecordView
            db_ans, no_db_ans = list(
                map(list, partition(ans, lambda x: isinstance(x, DBAnalysis))))
            self._calculate_cached_ages(db_ans, calculate_age, calculate_F)
            if unpack:
                for di in db_ans:
                    if not di.has_raw_data:
                        no_db_ans.append(di)
                        db_ans.remove(di)

            if load_aux:
                for di in db_ans:
                    if not di.has_changes:
                        if di not in no_db_ans:
                            no_db_ans.append(di)
                        db_ans.remove(di)

            if no_db_ans:
                if use_cache:
                    # partition into cached and non cached analyses
                    cached_ans, no_db_ans = partition(
                        no_db_ans, lambda x: x.uuid in ANALYSIS_CACHE)
                    cached_ans = list(cached_ans)
                    no_db_ans = list(no_db_ans)

                    cns = [ANALYSIS_CACHE[ci.uuid] for ci in cached_ans]

                    # if unpack is true make sure cached analyses have raw data
                    if unpack or load_aux:
                        if unpack:
                            a, b = self._unpack_cached_analyses(
                                cns, calculate_age, calculate_F)
                            db_ans.extend(a)
                            no_db_ans.extend(b)
                        if load_aux:
                            a, b = self._load_aux_cached_analyses(cns)
                            db_ans.extend(a)
                            no_db_ans.extend(b)
                    else:
                        self._calculate_cached_ages(cns, calculate_age,
                                                    calculate_F)
                        # add analyses from cache to db_ans
                        db_ans.extend(cns)

                    # increment value in cache_count
                    self._increment_cache(cached_ans, use_cache)

                # load remaining analyses
                n = len(no_db_ans)
                if n:
                    # self._clone_vcs_repos(no_db_ans)
                    progress = self._setup_progress(n, progress, use_progress)
                    db_ans, new_ans = self._construct_analyses(
                        no_db_ans,
                        db_ans,
                        progress,
                        calculate_age,
                        calculate_F,
                        unpack,
                        use_cache,
                        use_progress,
                        load_aux=load_aux,
                        **kw)
                    db_ans.extend(new_ans)

                    # self.debug('use vcs {}'.format(self.use_vcs))
                    # if self.use_vcs:
                    # if progress:
                    #         progress.increase_max(len(new_ans)+1)
                    #         progress.change_message('Adding analyses to vcs')
                    #
                    #     self.vcs.add_analyses(new_ans, progress=progress)

                    # self.debug('use offline database {}'.format(self.use_offline_database))
                    # if self.use_offline_database:
                    #     if progress:
                    #         progress.increase_max(len(new_ans) + 1)
                    #         progress.change_message('Transferring analyses for offline usage')
                    #     self.offline_bridge.add_analyses(db, new_ans, progress=progress)

            if progress:
                progress.soft_close()

            return db_ans