예제 #1
0
    def __init__(self, uuid, record_id, repository_identifier, *args, **kw):
        super(DVCAnalysis, self).__init__(*args, **kw)
        self.record_id = record_id
        path = analysis_path((uuid, record_id), repository_identifier)
        self.repository_identifier = repository_identifier
        self.rundate = datetime.datetime.now()

        root = os.path.dirname(path)
        bname = os.path.basename(path)
        head, ext = os.path.splitext(bname)

        ep = os.path.join(root, 'extraction', '{}.extr{}'.format(head, ext))
        if os.path.isfile(ep):
            jd = dvc_load(ep)

            self.load_extraction(jd)

        else:
            self.warning('Invalid analysis. RunID="{}". No extraction file {}'.format(record_id, ep))

        if os.path.isfile(path):
            jd = dvc_load(path)
            self.load_spectrometer_parameters(jd.get('spec_sha'))
            self.load_environmentals(jd.get('environmental'))

            self.load_meta(jd)
        else:
            self.warning('Invalid analysis. RunID="{}". No meta file {}'.format(record_id, path))

        self.load_paths()
예제 #2
0
    def __init__(self, uuid, record_id, repository_identifier, *args, **kw):
        super(DVCAnalysis, self).__init__(*args, **kw)
        self.record_id = record_id
        path = analysis_path((uuid, record_id), repository_identifier)
        self.repository_identifier = repository_identifier
        self.rundate = datetime.datetime.now()

        root = os.path.dirname(path)
        bname = os.path.basename(path)
        head, ext = os.path.splitext(bname)

        ep = os.path.join(root, 'extraction', '{}.extr{}'.format(head, ext))
        if os.path.isfile(ep):
            jd = dvc_load(ep)

            self.load_extraction(jd)

        else:
            self.warning('Invalid analysis. RunID="{}". No extraction file {}'.format(record_id, ep))

        if os.path.isfile(path):
            jd = dvc_load(path)
            self.load_spectrometer_parameters(jd.get('spec_sha'))
            self.load_environmentals(jd.get('environmental'))

            self.load_meta(jd)
        else:
            self.warning('Invalid analysis. RunID="{}". No meta file {}'.format(record_id, path))

        self.load_paths()
예제 #3
0
    def __init__(self, record_id, repository_identifier, *args, **kw):
        super(DVCAnalysis, self).__init__(*args, **kw)
        self.record_id = record_id
        path = analysis_path(record_id, repository_identifier)
        self.repository_identifier = repository_identifier
        self.rundate = datetime.datetime.now()
        root = os.path.dirname(path)
        bname = os.path.basename(path)
        head, ext = os.path.splitext(bname)

        jd = dvc_load(os.path.join(root, 'extraction', '{}.extr{}'.format(head, ext)))
        for attr in EXTRACTION_ATTRS:
            tag = attr
            if attr == 'cleanup_duration':
                if attr not in jd:
                    tag = 'cleanup'
            elif attr == 'extract_duration':
                if attr not in jd:
                    tag = 'duration'

            v = jd.get(tag)
            if v is not None:
                setattr(self, attr, v)

        pd = jd.get('positions')
        if pd:
            ps = sorted(pd, key=lambda x: x['position'])
            self.position = ','.join([str(pp['position']) for pp in ps])

            self.xyz_position = ';'.join([','.join(map(str, (pp['x'], pp['y'], pp['z']))) for pp in ps if pp['x'] is
                                          not None])

        if not self.extract_units:
            self.extract_units = 'W'

        jd = dvc_load(path)
        for attr in META_ATTRS:
            v = jd.get(attr)
            self.debug('{}={}'.format(attr, v))
            if v is not None:
                setattr(self, attr, v)

        if self.increment is not None:
            self.step = make_step(self.increment)

        ts = jd['timestamp']
        try:
            self.rundate = datetime.datetime.strptime(ts, '%Y-%m-%dT%H:%M:%S')
        except ValueError:
            self.rundate = datetime.datetime.strptime(ts, '%Y-%m-%dT%H:%M:%S.%f')

        self.collection_version = jd['collection_version']
        self._set_isotopes(jd)

        self.timestamp = make_timef(self.rundate)
        self.aliquot_step_str = make_aliquot_step(self.aliquot, self.step)

        self.load_paths()
        self.load_spectrometer_parameters(jd['spec_sha'])
예제 #4
0
    def update_productions(self,
                           irrad,
                           level,
                           production,
                           note=None,
                           add=True):
        p = os.path.join(paths.meta_root, irrad, 'productions.json')

        obj = dvc_load(p)
        obj['note'] = str(note) or ''

        if level in obj:
            if obj[level] != production:
                self.debug(
                    'setting production to irrad={}, level={}, prod={}'.format(
                        irrad, level, production))
                obj[level] = production
                dvc_dump(obj, p)

                if add:
                    self.add(p, commit=False)
        else:
            obj[level] = production
            dvc_dump(obj, p)
            if add:
                self.add(p, commit=False)
예제 #5
0
    def load_spectrometer_parameters(self, spec_sha):
        p = os.path.join(paths.repository_dataset_dir, self.repository_identifier, '{}.json'.format(spec_sha))
        sd = dvc_load(p)

        self.source_parameters = sd['spectrometer']
        self.gains = sd['gains']
        self.deflections = sd['deflections']
def set_spectrometer_file(dban, root):
    meas = dban.measurement
    gain_history = dban.gain_history
    gains = {}
    if gain_history:
        gains = {
            d.detector.name: d.value
            for d in gain_history.gains if d.value is not None
        }

    # deflections
    deflections = {
        d.detector.name: d.deflection
        for d in meas.deflections if d.deflection is not None
    }

    # source
    src = {
        k: getattr(meas.spectrometer_parameters, k)
        for k in QTEGRA_SOURCE_KEYS
    }

    obj = dict(spectrometer=src, gains=gains, deflections=deflections)
    # hexsha = self.dvc.get_meta_head()
    # obj['commit'] = str(hexsha)
    spec_sha = spectrometer_sha(src, gains, deflections)
    path = os.path.join(root, '{}.json'.format(spec_sha))
    dvc_dump(obj, path)

    # update analysis's spec_sha
    path = analysis_path(dban.record_id, os.path.basename(root))
    obj = dvc_load(path)
    obj['spec_sha'] = spec_sha
    dvc_dump(obj, path)
예제 #7
0
    def repository_db_sync(self, reponame):
        repo = self._get_repository(reponame, as_current=False)
        ps = []
        with self.db.session_ctx():
            ans = self.db.repository_analyses(reponame)
            for ai in ans:
                p = analysis_path(ai.record_id, reponame)
                obj = dvc_load(p)

                sample = None
                project = None
                material = None
                changed = False
                for attr, v in (('sample', sample), ('project', project),
                                ('material', material)):
                    if obj.get(attr) != v:
                        obj[attr] = v
                        changed = True

                if changed:
                    ps.append(p)
                    dvc_dump(obj, p)

            if ps:
                repo.pull()
                repo.add_paths(ps)
                repo.commit('Synced repository with database {}'.format(
                    self.db.datasource_url))
                repo.push()
예제 #8
0
 def func(x, prog, i, n):
     if prog:
         prog.change_message('Making Interpreted age {}'.format(x.name))
     obj = dvc_load(x.path)
     ia = DVCInterpretedAge()
     ia.from_json(obj)
     return ia
예제 #9
0
        def modify_meta(p):
            jd = dvc_load(p)

            jd['aliquot'] = aliquot
            jd['increment'] = alpha_to_int(step)

            dvc_dump(jd, p)
예제 #10
0
 def update_productions(self, irrad, level, production, add=True):
     p = os.path.join(paths.meta_root, irrad, 'productions.json')
     obj = dvc_load(p)
     obj[level] = production
     dvc_dump(obj, p)
     if add:
         self.add(p, commit=False)
예제 #11
0
    def update_flux(self, irradiation, level, pos, identifier, j, e, decay=None, analyses=None, add=True):
        if decay is None:
            decay = {}
        if analyses is None:
            analyses = []

        p = self.get_level_path(irradiation, level)
        jd = dvc_load(p)
        npos = {'position': pos, 'j': j, 'j_err': e,
                'decay_constants': decay,
                'identifier': identifier,
                'analyses': [{'uuid': ai.uuid,
                              'record_id': ai.record_id,
                              'status': ai.is_omitted()}
                             for ai in analyses]}
        if jd:
            added = any((ji['position'] == pos for ji in jd))
            njd = [ji if ji['position'] != pos else npos for ji in jd]
            if not added:
                njd.append(npos)

        else:
            njd = [npos]

        dvc_dump(njd, p)
        if add:
            self.add(p, commit=False)
예제 #12
0
def set_spectrometer_file(dban, root):
    meas = dban.measurement
    gain_history = dban.gain_history
    gains = {}
    if gain_history:
        gains = {d.detector.name: d.value for d in gain_history.gains if d.value is not None}

    # deflections
    deflections = {d.detector.name: d.deflection for d in meas.deflections if d.deflection is not None}

    # source
    src = {k: getattr(meas.spectrometer_parameters, k) for k in QTEGRA_SOURCE_KEYS}

    obj = dict(spectrometer=src,
               gains=gains,
               deflections=deflections)
    # hexsha = self.dvc.get_meta_head()
    # obj['commit'] = str(hexsha)
    spec_sha = spectrometer_sha(src, gains, deflections)
    path = os.path.join(root, '{}.json'.format(spec_sha))
    dvc_dump(obj, path)

    # update analysis's spec_sha
    path = analysis_path(dban.record_id, os.path.basename(root))
    obj = dvc_load(path)
    obj['spec_sha'] = spec_sha
    dvc_dump(obj, path)
예제 #13
0
 def update_productions(self, irrad, level, production, add=True):
     p = os.path.join(paths.meta_root, irrad, 'productions.json')
     obj = dvc_load(p)
     obj[level] = production
     dvc_dump(obj, p)
     if add:
         self.add(p, commit=False)
예제 #14
0
def _fix_id(src_id, dest_id, identifier, root, repo, new_aliquot=None):
    sp = analysis_path(src_id, repo, root=root)
    dp = analysis_path(dest_id, repo, root=root, mode='w')
    print(sp, dp)
    if not os.path.isfile(sp):
        print('not a file', sp)
        return

    jd = dvc_load(sp)
    jd['identifier'] = identifier
    if new_aliquot:
        jd['aliquot'] = new_aliquot

    dvc_dump(jd, dp)

    print('{}>>{}'.format(sp, dp))
    for modifier in ('baselines', 'blanks', 'extraction', 'intercepts',
                     'icfactors', 'peakcenter', '.data'):
        sp = analysis_path(src_id, repo, modifier=modifier, root=root)
        dp = analysis_path(dest_id,
                           repo,
                           modifier=modifier,
                           root=root,
                           mode='w')
        print('{}>>{}'.format(sp, dp))
        if sp and os.path.isfile(sp):
            # shutil.copy(sp, dp)
            shutil.move(sp, dp)
예제 #15
0
    def get_default_productions(self):
        p = os.path.join(paths.meta_root, 'reactors.json')
        if not os.path.isfile(p):
            with open(p, 'w') as wfile:
                from pychron.file_defaults import REACTORS_DEFAULT
                wfile.write(REACTORS_DEFAULT)

        return dvc_load(p)
예제 #16
0
 def _get_level_positions(self, irrad, level):
     p = self.get_level_path(irrad, level)
     obj = dvc_load(p)
     if isinstance(obj, list):
         positions = obj
     else:
         positions = obj.get('positions', [])
     return positions
예제 #17
0
파일: meta_repo.py 프로젝트: NMGRL/pychron
 def _get_level_positions(self, irrad, level):
     p = self.get_level_path(irrad, level)
     obj = dvc_load(p)
     if isinstance(obj, list):
         positions = obj
     else:
         positions = obj.get('positions', [])
     return positions
예제 #18
0
파일: meta_repo.py 프로젝트: NMGRL/pychron
    def get_default_productions(self):
        p = os.path.join(paths.meta_root, 'reactors.json')
        if not os.path.isfile(p):
            with open(p, 'w') as wfile:
                from pychron.file_defaults import REACTORS_DEFAULT
                wfile.write(REACTORS_DEFAULT)

        return dvc_load(p)
예제 #19
0
    def get_production(self, irrad, level, **kw):
        path = os.path.join(paths.meta_root, irrad, 'productions.json')
        obj = dvc_load(path)
        pname = obj[level]
        p = os.path.join(paths.meta_root, irrad, 'productions', add_extension(pname, ext='.json'))

        ip = Production(p)
        return pname, ip
예제 #20
0
    def get_production(self, irrad, level, **kw):
        path = os.path.join(paths.meta_root, irrad, 'productions.json')
        obj = dvc_load(path)
        pname = obj[level]
        p = os.path.join(paths.meta_root, irrad, 'productions',
                         add_extension(pname, ext='.json'))

        ip = Production(p)
        return pname, ip
예제 #21
0
def get_frozen_flux(repo, irradiation):
    path = repository_path(repo, '{}.json'.format(irradiation))

    fd = {}
    if path:
        fd = dvc_load(path)
        for fi in fd.values():
            fi['j'] = ufloat(*fi['j'], tag='J')
    return fd
예제 #22
0
파일: meta_repo.py 프로젝트: NMGRL/pychron
def get_frozen_flux(repo, irradiation):
    path = repository_path(repo, '{}.json'.format(irradiation))

    fd = {}
    if path:
        fd = dvc_load(path)
        for fi in fd.values():
            fi['j'] = ufloat(*fi['j'], tag='J')
    return fd
예제 #23
0
    def set_identifier(self, irradiation, level, pos, identifier):
        p = self.get_level_path(irradiation, level)
        jd = dvc_load(p)

        d = next((p for p in jd if p['position'] != pos), None)
        if d:
            d['identifier'] = identifier

        dvc_dump(jd, p)
        self.add(p, commit=False)
예제 #24
0
파일: meta_repo.py 프로젝트: NMGRL/pychron
    def get_production(self, irrad, level, **kw):
        path = os.path.join(paths.meta_root, irrad, 'productions.json')
        obj = dvc_load(path)

        pname = obj.get(level, '')
        p = os.path.join(paths.meta_root, irrad, 'productions', add_extension(pname, ext='.json'))

        ip = Production(p)
        # print 'new production id={}, name={}, irrad={}, level={}'.format(id(ip), pname, irrad, level)
        return pname, ip
예제 #25
0
    def load_paths(self, modifiers=None):
        if modifiers is None:
            modifiers = ('intercepts', 'baselines', 'blanks', 'icfactors', 'tags')

        for modifier in modifiers:
            path = self._analysis_path(modifier=modifier)
            if path and os.path.isfile(path):
                jd = dvc_load(path)
                func = getattr(self, '_load_{}'.format(modifier))
                func(jd)
예제 #26
0
    def find_interpreted_ages(self, identifiers, repositories):
        ias = []
        for idn in identifiers:
            path = find_interpreted_age_path(idn, repositories)
            if path:
                obj = dvc_load(path)
                name = obj.get('name')
                ias.append(InterpretedAgeRecordView(idn, path, name))

        return ias
예제 #27
0
    def set_identifier(self, irradiation, level, pos, identifier):
        p = self.get_level_path(irradiation, level)
        jd = dvc_load(p)

        d = next((p for p in jd if p['position'] != pos), None)
        if d:
            d['identifier'] = identifier

        dvc_dump(jd, p)
        self.add(p, commit=False)
def fix_meta(dest, repo_identifier, root):
    d = os.path.join(root, repo_identifier)
    changed = False
    with dest.session_ctx():
        repo = dest.get_repository(repo_identifier)
        for ra in repo.repository_associations:
            an = ra.analysis
            p = analysis_path(an.record_id, repo_identifier)
            obj = dvc_load(p)
            if not obj:
                print '********************** {} not found in repo'.format(
                    an.record_id)
                continue

            print an.record_id, p
            if not obj['irradiation']:
                obj['irradiation'] = an.irradiation
                lchanged = True
                changed = True
            if not obj['irradiation_position']:
                obj['irradiation_position'] = an.irradiation_position_position
                lchanged = True
                changed = True
            if not obj['irradiation_level']:
                obj['irradiation_level'] = an.irradiation_level
                lchanged = True
                changed = True
            if not obj['material']:
                obj['material'] = an.irradiation_position.sample.material.name
                lchanged = True
                changed = True
            if not obj['project']:
                obj['project'] = an.irradiation_position.sample.project.name
                lchanged = True
                changed = True

            if obj['repository_identifier'] != an.repository_identifier:
                obj['repository_identifier'] = an.repository_identifier
                lchanged = True
                changed = True

            if lchanged:
                print '{} changed'.format(an.record_id)
                dvc_dump(obj, p)

    if changed:
        from pychron.git_archive.repo_manager import GitRepoManager
        rm = GitRepoManager()
        rm.open_repo(d)

        repo = rm._repo
        repo.git.add('.')
        repo.git.commit('-m', '<MANUAL> fixed metadata')
        repo.git.push()
예제 #29
0
    def get_production(self, irrad, level, **kw):
        path = os.path.join(paths.meta_root, irrad, 'productions.json')
        obj = dvc_load(path)

        pname = obj.get(level, '')
        p = os.path.join(paths.meta_root, irrad, 'productions',
                         add_extension(pname, ext='.json'))

        ip = Production(p)
        # print 'new production id={}, name={}, irrad={}, level={}'.format(id(ip), pname, irrad, level)
        return pname, ip
예제 #30
0
def fix_meta(dest, repo_identifier, root):
    d = os.path.join(root, repo_identifier)
    changed = False
    with dest.session_ctx():
        repo = dest.get_repository(repo_identifier)
        for ra in repo.repository_associations:
            an = ra.analysis
            p = analysis_path(an.record_id, repo_identifier)
            obj = dvc_load(p)
            if not obj:
                print('********************** {} not found in repo'.format(an.record_id))
                continue

            print(an.record_id, p)
            if not obj['irradiation']:
                obj['irradiation'] = an.irradiation
                lchanged = True
                changed = True
            if not obj['irradiation_position']:
                obj['irradiation_position'] = an.irradiation_position_position
                lchanged = True
                changed = True
            if not obj['irradiation_level']:
                obj['irradiation_level'] = an.irradiation_level
                lchanged = True
                changed = True
            if not obj['material']:
                obj['material'] = an.irradiation_position.sample.material.name
                lchanged = True
                changed = True
            if not obj['project']:
                obj['project'] = an.irradiation_position.sample.project.name
                lchanged = True
                changed = True

            if obj['repository_identifier'] != an.repository_identifier:
                obj['repository_identifier'] = an.repository_identifier
                lchanged = True
                changed = True

            if lchanged:
                print('{} changed'.format(an.record_id))
                dvc_dump(obj, p)

    if changed:
        from pychron.git_archive.repo_manager import GitRepoManager
        rm = GitRepoManager()
        rm.open_repo(d)

        repo = rm._repo
        repo.git.add('.')
        repo.git.commit('-m', '<MANUAL> fixed metadata')
        repo.git.push()
예제 #31
0
def fix_iso_list(runid, repository, root):
    path = analysis_path(runid, repository, root=root)
    # print('asdf', path)
    obj = dvc_load(path)
    isotopes = obj['isotopes']
    try:
        v = isotopes.pop('PHHCbs')
        v['name'] = 'Ar39'
        isotopes['Ar39'] = v
        obj['isotopes'] = isotopes
        dvc_dump(obj, path)
    except KeyError:
        return
예제 #32
0
def fix_iso_list(runid, repository, root):
    path = analysis_path(runid, repository, root=root)
    # print('asdf', path)
    obj = dvc_load(path)
    isotopes = obj['isotopes']
    try:
        v = isotopes.pop('PHHCbs')
        v['name'] = 'Ar39'
        isotopes['Ar39'] = v
        obj['isotopes'] = isotopes
        dvc_dump(obj, path)
    except KeyError:
        return
예제 #33
0
    def load_paths(self, modifiers=None):
        if modifiers is None:
            modifiers = ('intercepts', 'baselines', 'blanks', 'icfactors', 'tags', 'peakcenter')

        for modifier in modifiers:
            path = self._analysis_path(modifier=modifier)
            if path and os.path.isfile(path):
                jd = dvc_load(path)
                func = getattr(self, '_load_{}'.format(modifier))
                try:
                    func(jd)
                except BaseException as e:
                    self.warning('Failed loading {}. error={}'.format(modifier, e))
예제 #34
0
    def _set_isotopes(self, jd):
        isos = jd.get('isotopes')
        if not isos:
            return

        isos = {k: Isotope(k, v['detector']) for k, v in isos.iteritems()}
        self.isotopes = isos

        # set mass
        path = os.path.join(paths.meta_root, 'molecular_weights.json')
        masses = dvc_load(path)
        for k, v in isos.items():
            v.mass = masses.get(k, 0)
def fix_a_steps(dest, repo_identifier, root):
    with dest.session_ctx():
        repo = dest.get_repository(repo_identifier)

        ans = [
            (ra.analysis.irradiation_position.identifier, ra.analysis.aliquot,
             ra.analysis.increment, ra.analysis.record_id, ra.analysis.id)
            for ra in repo.repository_associations
        ]
        key = lambda x: x[0]
        ans = sorted(ans, key=key)
        for identifier, ais in groupby(ans, key=key):
            try:
                int(identifier)
            except ValueError:
                continue

            # groupby aliquot
            key = lambda xi: xi[1]
            for aliquot, ais in groupby(ais, key=key):
                ais = sorted(ais, key=lambda ai: ai[2])
                print identifier, aliquot, ais
                # if the first increment for a given aliquot is 1
                # and the increment for the first analysis of the aliquot is None
                if len(ais) == 1:
                    continue

                if ais[0][2] is None and ais[1][2] == 1:
                    an = dest.get_analysis(ais[0][4])
                    print 'fix', ais[0], an, an.record_id
                    original_record_id = str(an.record_id)
                    path = analysis_path(an.record_id, repo_identifier)
                    obj = dvc_load(path)
                    obj['increment'] = 0

                    an.increment = 0
                    npath = analysis_path(an.record_id, repo_identifier)
                    dvc_dump(obj, npath)
                    os.remove(path)

                    for modifier in ('baselines', 'blanks', 'extraction',
                                     'intercepts', 'icfactors', 'peakcenter',
                                     '.data'):
                        npath = analysis_path(an.record_id,
                                              repo_identifier,
                                              modifier=modifier)
                        opath = analysis_path(original_record_id,
                                              repo_identifier,
                                              modifier=modifier)
                        # print opath, npath
                        os.rename(opath, npath)
예제 #36
0
    def update_level_z(self, irradiation, level, z):
        p = self.get_level_path(irradiation, level)
        obj = dvc_load(p)

        try:
            add = obj['z'] != z
            obj['z'] = z
        except TypeError:
            obj = {'z': z, 'positions': obj}
            add = True

        dvc_dump(obj, p)
        if add:
            self.add(p, commit=False)
예제 #37
0
파일: meta_repo.py 프로젝트: NMGRL/pychron
    def update_level_z(self, irradiation, level, z):
        p = self.get_level_path(irradiation, level)
        obj = dvc_load(p)

        try:
            add = obj['z'] != z
            obj['z'] = z
        except TypeError:
            obj = {'z': z, 'positions': obj}
            add = True

        dvc_dump(obj, p)
        if add:
            self.add(p, commit=False)
예제 #38
0
def fix_run(runid, repository, root, modifier):
    path = analysis_path(runid, repository, root=root, modifier=modifier)
    # print('asdf', path)
    obj = dvc_load(path)
    # print('ff', obj)
    try:
        v = obj.pop('PHHCbs')
        obj['Ar39'] = v
        dvc_dump(obj, path)
        msg = 'fixed'
    except KeyError:
        msg = 'skipped'

    print(runid, msg)
예제 #39
0
def fix_run(runid, repository, root, modifier):
    path = analysis_path(runid, repository, root=root, modifier=modifier)
    # print('asdf', path)
    obj = dvc_load(path)
    # print('ff', obj)
    try:
        v = obj.pop('PHHCbs')
        obj['Ar39'] = v
        dvc_dump(obj, path)
        msg = 'fixed'
    except KeyError:
        msg = 'skipped'

    print(runid, msg)
예제 #40
0
    def remove_irradiation_position(self, irradiation, level, hole):
        p = self.get_level_path(irradiation, level)
        jd = dvc_load(p)
        if jd:
            if isinstance(jd, list):
                positions = jd
                z = 0
            else:
                positions = jd['positions']
                z = jd['z']

            npositions = [ji for ji in positions if not ji['position'] == hole]
            obj = {'z': z, 'positions': npositions}
            dvc_dump(obj, p)
            self.add(p, commit=False)
예제 #41
0
        def ai_gen():
            key = lambda x: x.irradiation
            lkey = lambda x: x.level
            rkey = lambda x: x.repository_identifier

            for irrad, ais in groupby(sorted(ans, key=key), key=key):
                for level, ais in groupby(sorted(ais, key=lkey), key=lkey):
                    p = self.get_level_path(irrad, level)
                    obj = dvc_load(p)
                    for repo, ais in groupby(sorted(ais, key=rkey), key=rkey):
                        yield repo, irrad, level, {
                            ai.irradiation_position:
                            obj[ai.irradiation_position]
                            for ai in ais
                        }
예제 #42
0
파일: meta_repo.py 프로젝트: NMGRL/pychron
    def remove_irradiation_position(self, irradiation, level, hole):
        p = self.get_level_path(irradiation, level)
        jd = dvc_load(p)
        if jd:
            if isinstance(jd, list):
                positions = jd
                z = 0
            else:
                positions = jd['positions']
                z = jd['z']

            npositions = [ji for ji in positions if not ji['position'] == hole]
            obj = {'z': z, 'positions': npositions}
            dvc_dump(obj, p)
            self.add(p, commit=False)
예제 #43
0
파일: meta_repo.py 프로젝트: NMGRL/pychron
    def get_sensitivities(self):
        specs = {}
        root = os.path.join(paths.meta_root, 'spectrometers')
        for p in list_directory(root):
            if p.endswith('.sens.json'):
                name = p.split('.')[0]
                p = os.path.join(root, p)
                obj = dvc_load(p)

                specs[name] = obj
                for r in obj:
                    if r['create_date']:
                        r['create_date'] = datetime.strptime(r['create_date'], DATE_FORMAT)

        return specs
예제 #44
0
    def load_raw_data(self, keys=None, n_only=False):
        def format_blob(blob):
            return base64.b64decode(blob)

        path = self._analysis_path(modifier='.data')
        isotopes = self.isotopes

        jd = dvc_load(path)
        signals = jd['signals']
        baselines = jd['baselines']
        sniffs = jd['sniffs']

        for sd in signals:
            isok = sd['isotope']
            if keys and isok not in keys:
                continue
            try:
                iso = isotopes[isok]
            except KeyError:
                continue

            iso.unpack_data(format_blob(sd['blob']), n_only)

            det = sd['detector']
            bd = next((b for b in baselines if b['detector'] == det), None)
            if bd:
                iso.baseline.unpack_data(format_blob(bd['blob']), n_only)

        # loop thru keys to make sure none were missed this can happen when only loading baseline
        if keys:
            for k in keys:
                bd = next((b for b in baselines if b['detector'] == k), None)
                if bd:
                    for iso in isotopes.itervalues():
                        if iso.detector == k:
                            iso.baseline.unpack_data(format_blob(bd['blob']),
                                                     n_only)

        for sn in sniffs:
            isok = sn['isotope']
            if keys and isok not in keys:
                continue

            try:
                iso = isotopes[isok]
            except KeyError:
                continue
            iso.sniff.unpack_data(format_blob(sn['blob']), n_only)
예제 #45
0
    def get_sensitivities(self):
        specs = {}
        root = os.path.join(paths.meta_root, 'spectrometers')
        for p in list_directory(root):
            if p.endswith('.sens.json'):
                name = p.split('.')[0]
                p = os.path.join(root, p)
                obj = dvc_load(p)

                specs[name] = obj
                for r in obj:
                    if r['create_date']:
                        r['create_date'] = datetime.strptime(
                            r['create_date'], DATE_FORMAT)

        return specs
예제 #46
0
    def load_raw_data(self, keys=None, n_only=False):
        def format_blob(blob):
            return base64.b64decode(blob)

        path = self._analysis_path(modifier='.data')
        isotopes = self.isotopes

        jd = dvc_load(path)
        signals = jd['signals']
        baselines = jd['baselines']
        sniffs = jd['sniffs']

        for sd in signals:
            isok = sd['isotope']
            if keys and isok not in keys:
                continue
            try:
                iso = isotopes[isok]
            except KeyError:
                continue

            iso.unpack_data(format_blob(sd['blob']), n_only)

            det = sd['detector']
            bd = next((b for b in baselines if b['detector'] == det), None)
            if bd:
                iso.baseline.unpack_data(format_blob(bd['blob']), n_only)

        # loop thru keys to make sure none were missed this can happen when only loading baseline
        if keys:
            for k in keys:
                bd = next((b for b in baselines if b['detector'] == k), None)
                if bd:
                    for iso in isotopes.itervalues():
                        if iso.detector == k:
                            iso.baseline.unpack_data(format_blob(bd['blob']), n_only)

        for sn in sniffs:
            isok = sn['isotope']
            if keys and isok not in keys:
                continue

            try:
                iso = isotopes[isok]
            except KeyError:
                continue
            iso.sniff.unpack_data(format_blob(sn['blob']), n_only)
예제 #47
0
    def update_flux(self,
                    irradiation,
                    level,
                    pos,
                    identifier,
                    j,
                    e,
                    decay=None,
                    analyses=None,
                    add=True):
        if decay is None:
            decay = {}
        if analyses is None:
            analyses = []

        p = self.get_level_path(irradiation, level)
        jd = dvc_load(p)
        npos = {
            'position':
            pos,
            'j':
            j,
            'j_err':
            e,
            'decay_constants':
            decay,
            'identifier':
            identifier,
            'analyses': [{
                'uuid': ai.uuid,
                'record_id': ai.record_id,
                'status': ai.is_omitted()
            } for ai in analyses]
        }
        if jd:
            added = any((ji['position'] == pos for ji in jd))
            njd = [ji if ji['position'] != pos else npos for ji in jd]
            if not added:
                njd.append(npos)

        else:
            njd = [npos]

        dvc_dump(njd, p)
        if add:
            self.add(p, commit=False)
예제 #48
0
        def func(x, prog, i, n):
            repo, irrad, level, d = x
            if prog:
                prog.change_message('Freezing Flux {}{} Repository={}'.format(
                    irrad, level, repo))

            root = os.path.join(paths.repository_dataset_dir, repo, 'flux',
                                irrad)
            r_mkdir(root)

            p = os.path.join(root, level)
            if os.path.isfile(p):
                dd = dvc_load(p)
                dd.update(d)

            dvc_dump(d, p)
            added.append((repo, p))
예제 #49
0
파일: meta_repo.py 프로젝트: NMGRL/pychron
    def add_position(self, irradiation, level, pos, add=True):
        p = self.get_level_path(irradiation, level)
        jd = dvc_load(p)
        if isinstance(jd, list):
            positions = jd
            z = 0
        else:
            positions = jd.get('positions', [])
            z = jd.get('z', 0)

        pd = next((p for p in positions if p['position'] == pos), None)
        if pd is None:
            positions.append({'position': pos, 'decay_constants': {}})

        dvc_dump({'z': z, 'positions': positions}, p)
        if add:
            self.add(p, commit=False)
예제 #50
0
파일: meta_repo.py 프로젝트: NMGRL/pychron
    def update_fluxes(self, irradiation, level, j, e, add=True):
        p = self.get_level_path(irradiation, level)
        jd = dvc_load(p)

        if isinstance(jd, list):
            positions = jd
        else:
            positions = jd.get('positions')

        if positions:
            for ip in positions:
                ip['j'] = j
                ip['j_err'] = e

            dvc_dump(jd, p)
            if add:
                self.add(p, commit=False)
예제 #51
0
def fix_a_steps(dest, repo_identifier, root):
    with dest.session_ctx():
        repo = dest.get_repository(repo_identifier)

        ans = [(ra.analysis.irradiation_position.identifier, ra.analysis.aliquot, ra.analysis.increment,
                ra.analysis.record_id, ra.analysis.id)
               for ra in repo.repository_associations]
        key = lambda x: x[0]
        ans = sorted(ans, key=key)
        for identifier, ais in groupby(ans, key=key):
            try:
                int(identifier)
            except ValueError:
                continue

            # groupby aliquot
            key = lambda xi: xi[1]
            for aliquot, ais in groupby(ais, key=key):
                ais = sorted(ais, key=lambda ai: ai[2])
                print(identifier, aliquot, ais)
                # if the first increment for a given aliquot is 1
                # and the increment for the first analysis of the aliquot is None
                if len(ais) == 1:
                    continue

                if ais[0][2] is None and ais[1][2] == 1:
                    an = dest.get_analysis(ais[0][4])
                    print('fix', ais[0], an, an.record_id)
                    original_record_id = str(an.record_id)
                    path = analysis_path(an.record_id, repo_identifier)
                    obj = dvc_load(path)
                    obj['increment'] = 0

                    an.increment = 0
                    npath = analysis_path(an.record_id, repo_identifier)
                    dvc_dump(obj, npath)
                    os.remove(path)

                    for modifier in ('baselines', 'blanks', 'extraction',
                                     'intercepts', 'icfactors', 'peakcenter', '.data'):
                        npath = analysis_path(an.record_id, repo_identifier, modifier=modifier)
                        opath = analysis_path(original_record_id, repo_identifier, modifier=modifier)
                        # print opath, npath
                        os.rename(opath, npath)
예제 #52
0
파일: meta_repo.py 프로젝트: NMGRL/pychron
    def update_productions(self, irrad, level, production, note=None, add=True):
        p = os.path.join(paths.meta_root, irrad, 'productions.json')

        obj = dvc_load(p)
        obj['note'] = str(note) or ''

        if level in obj:
            if obj[level] != production:
                self.debug('setting production to irrad={}, level={}, prod={}'.format(irrad, level, production))
                obj[level] = production
                dvc_dump(obj, p)

                if add:
                    self.add(p, commit=False)
        else:
            obj[level] = production
            dvc_dump(obj, p)
            if add:
                self.add(p, commit=False)
예제 #53
0
파일: meta_repo.py 프로젝트: NMGRL/pychron
    def update_flux(self, irradiation, level, pos, identifier, j, e, mj, me, decay=None,
                    position_jerr=None,
                    analyses=None, options=None, add=True):

        if options is None:
            options = {}

        if decay is None:
            decay = {}
        if analyses is None:
            analyses = []

        p = self.get_level_path(irradiation, level)
        jd = dvc_load(p)
        if isinstance(jd, list):
            positions = jd
            z = 0
        else:
            positions = jd.get('positions', [])
            z = jd.get('z', 0)

        npos = {'position': pos, 'j': j, 'j_err': e,
                'mean_j': mj, 'mean_j_err': me,
                'position_jerr': position_jerr,
                'decay_constants': decay,
                'identifier': identifier,
                'options': options,
                'analyses': [{'uuid': ai.uuid,
                              'record_id': ai.record_id,
                              'status': ai.is_omitted()}
                             for ai in analyses]}
        if positions:
            added = any((ji['position'] == pos for ji in positions))
            npositions = [ji if ji['position'] != pos else npos for ji in positions]
            if not added:
                npositions.append(npos)
        else:
            npositions = [npos]

        obj = {'z': z, 'positions': npositions}
        dvc_dump(obj, p)
        if add:
            self.add(p, commit=False)
예제 #54
0
    def _make_flux_file(self, repo, irrad, unks):
        path = os.path.join(paths.repository_dataset_dir, repo, '{}.json'.format(irrad))

        # read in existing flux file

        obj = {}
        if os.path.isfile(path):
            obj = dvc_load(path)

        added = []
        for unk in unks:
            identifier = unk.identifier
            if identifier not in added:
                f = {'j': self.recaller.get_flux(identifier)}

                obj[identifier] = f
                added.append(identifier)

        dvc_dump(obj, path)
        self._paths.append(path)
예제 #55
0
    def add_position(self, irradiation, level, pos, add=True):
        p = self.get_level_path(irradiation, level)
        jd = dvc_load(p)

        pd = next((p for p in jd if p['position'] == pos), None)
        if pd is None:
            jd.append({'position': pos, 'decay_constants': {}})
        # for pd in jd:
        #     if pd['position'] == pos:

        # njd = [ji if ji['position'] != pos else {'position': pos, 'j': j, 'j_err': e,
        #                                          'decay_constants': decay,
        #                                          'identifier': identifier,
        #                                          'analyses': [{'uuid': ai.uuid,
        #                                                        'record_id': ai.record_id,
        #                                                        'status': ai.is_omitted()}
        #                                                       for ai in analyses]} for ji in jd]

        dvc_dump(jd, p)
        if add:
            self.add(p, commit=False)
예제 #56
0
파일: meta_repo.py 프로젝트: NMGRL/pychron
    def get_cocktail_irradiation(self):
        """
        example cocktail.json

        {
            "chronology": "2016-06-01 17:00:00",
            "j": 4e-4,
            "j_err": 4e-9
        }

        :return:
        """
        p = os.path.join(paths.meta_root, 'cocktail.json')
        ret = dvc_load(p)
        nret = {}
        if ret:
            lines = ['1.0, {}, {}'.format(ret['chronology'], ret['chronology'])]
            c = Chronology.from_lines(lines)
            nret['chronology'] = c
            nret['flux'] = ufloat(ret['j'], ret['j_err'])

        return nret