Example #1
0
    def add_repository_association(self, expid, runspec):
        db = self.db
        with db.session_ctx():
            dban = db.get_analysis_uuid(runspec.uuid)
            if dban:
                for e in dban.repository_associations:
                    if e.repository == expid:
                        break
                else:
                    db.add_repository_association(expid, dban)

                src_expid = runspec.repository_identifier
                if src_expid != expid:
                    repo = self._get_repository(expid)

                    for m in PATH_MODIFIERS:
                        src = analysis_path(runspec.record_id,
                                            src_expid,
                                            modifier=m)
                        dest = analysis_path(runspec.record_id,
                                             expid,
                                             modifier=m,
                                             mode='w')

                        shutil.copyfile(src, dest)
                        repo.add(dest, commit=False)
                    repo.commit('added repository association')
            else:
                self.warning('{} not in the database {}'.format(
                    runspec.runid, self.db.name))
def fix_a_steps(dest, repo_identifier, root):
    with dest.session_ctx():
        repo = dest.get_repository(repo_identifier)

        ans = [
            (ra.analysis.irradiation_position.identifier, ra.analysis.aliquot,
             ra.analysis.increment, ra.analysis.record_id, ra.analysis.id)
            for ra in repo.repository_associations
        ]
        key = lambda x: x[0]
        ans = sorted(ans, key=key)
        for identifier, ais in groupby(ans, key=key):
            try:
                int(identifier)
            except ValueError:
                continue

            # groupby aliquot
            key = lambda xi: xi[1]
            for aliquot, ais in groupby(ais, key=key):
                ais = sorted(ais, key=lambda ai: ai[2])
                print identifier, aliquot, ais
                # if the first increment for a given aliquot is 1
                # and the increment for the first analysis of the aliquot is None
                if len(ais) == 1:
                    continue

                if ais[0][2] is None and ais[1][2] == 1:
                    an = dest.get_analysis(ais[0][4])
                    print 'fix', ais[0], an, an.record_id
                    original_record_id = str(an.record_id)
                    path = analysis_path(an.record_id, repo_identifier)
                    obj = dvc_load(path)
                    obj['increment'] = 0

                    an.increment = 0
                    npath = analysis_path(an.record_id, repo_identifier)
                    dvc_dump(obj, npath)
                    os.remove(path)

                    for modifier in ('baselines', 'blanks', 'extraction',
                                     'intercepts', 'icfactors', 'peakcenter',
                                     '.data'):
                        npath = analysis_path(an.record_id,
                                              repo_identifier,
                                              modifier=modifier)
                        opath = analysis_path(original_record_id,
                                              repo_identifier,
                                              modifier=modifier)
                        # print opath, npath
                        os.rename(opath, npath)
Example #3
0
    def repository_db_sync(self, reponame):
        repo = self._get_repository(reponame, as_current=False)
        ps = []
        with self.db.session_ctx():
            ans = self.db.repository_analyses(reponame)
            for ai in ans:
                p = analysis_path(ai.record_id, reponame)
                obj = dvc_load(p)

                sample = None
                project = None
                material = None
                changed = False
                for attr, v in (('sample', sample), ('project', project),
                                ('material', material)):
                    if obj.get(attr) != v:
                        obj[attr] = v
                        changed = True

                if changed:
                    ps.append(p)
                    dvc_dump(obj, p)

            if ps:
                repo.pull()
                repo.add_paths(ps)
                repo.commit('Synced repository with database {}'.format(
                    self.db.datasource_url))
                repo.push()
Example #4
0
def set_spectrometer_file(dban, root):
    meas = dban.measurement
    gain_history = dban.gain_history
    gains = {}
    if gain_history:
        gains = {d.detector.name: d.value for d in gain_history.gains if d.value is not None}

    # deflections
    deflections = {d.detector.name: d.deflection for d in meas.deflections if d.deflection is not None}

    # source
    src = {k: getattr(meas.spectrometer_parameters, k) for k in QTEGRA_SOURCE_KEYS}

    obj = dict(spectrometer=src,
               gains=gains,
               deflections=deflections)
    # hexsha = self.dvc.get_meta_head()
    # obj['commit'] = str(hexsha)
    spec_sha = spectrometer_sha(src, gains, deflections)
    path = os.path.join(root, '{}.json'.format(spec_sha))
    dvc_dump(obj, path)

    # update analysis's spec_sha
    path = analysis_path(dban.record_id, os.path.basename(root))
    obj = dvc_load(path)
    obj['spec_sha'] = spec_sha
    dvc_dump(obj, path)
def set_spectrometer_file(dban, root):
    meas = dban.measurement
    gain_history = dban.gain_history
    gains = {}
    if gain_history:
        gains = {
            d.detector.name: d.value
            for d in gain_history.gains if d.value is not None
        }

    # deflections
    deflections = {
        d.detector.name: d.deflection
        for d in meas.deflections if d.deflection is not None
    }

    # source
    src = {
        k: getattr(meas.spectrometer_parameters, k)
        for k in QTEGRA_SOURCE_KEYS
    }

    obj = dict(spectrometer=src, gains=gains, deflections=deflections)
    # hexsha = self.dvc.get_meta_head()
    # obj['commit'] = str(hexsha)
    spec_sha = spectrometer_sha(src, gains, deflections)
    path = os.path.join(root, '{}.json'.format(spec_sha))
    dvc_dump(obj, path)

    # update analysis's spec_sha
    path = analysis_path(dban.record_id, os.path.basename(root))
    obj = dvc_load(path)
    obj['spec_sha'] = spec_sha
    dvc_dump(obj, path)
Example #6
0
    def dump(self):
        obj = {'name': self.name, 'omit_dict': self.omit_dict}
        if not self.path:
            self.path = analysis_path(self.record_id, self.experiment_identifier, modifier='tag', mode='w')

        # with open(self.path, 'w') as wfile:
        #     json.dump(obj, wfile, indent=4)
        jdump(obj, self.path)
 def _make_path(self, modifier=None, extension='.json'):
     runid = self.per_spec.run_spec.runid
     repository_identifier = self.per_spec.run_spec.repository_identifier
     return analysis_path(runid,
                          repository_identifier,
                          modifier,
                          extension,
                          mode='w')
Example #8
0
    def from_analysis(cls, an):
        tag = cls()
        tag.omit_dict = {k: getattr(an, k) for k in OMIT_KEYS}
        tag.name = an.tag
        tag.record_id = an.record_id
        tag.experiment_identifier = an.experiment_identifier
        tag.path = analysis_path(an.record_id, an.experiment_identifier, modifier='tag')

        return tag
Example #9
0
def fix_a_steps(dest, repo_identifier, root):
    with dest.session_ctx():
        repo = dest.get_repository(repo_identifier)

        ans = [(ra.analysis.irradiation_position.identifier, ra.analysis.aliquot, ra.analysis.increment,
                ra.analysis.record_id, ra.analysis.id)
               for ra in repo.repository_associations]
        key = lambda x: x[0]
        ans = sorted(ans, key=key)
        for identifier, ais in groupby(ans, key=key):
            try:
                int(identifier)
            except ValueError:
                continue

            # groupby aliquot
            key = lambda xi: xi[1]
            for aliquot, ais in groupby(ais, key=key):
                ais = sorted(ais, key=lambda ai: ai[2])
                print identifier, aliquot, ais
                # if the first increment for a given aliquot is 1
                # and the increment for the first analysis of the aliquot is None
                if len(ais) == 1:
                    continue

                if ais[0][2] is None and ais[1][2] == 1:
                    an = dest.get_analysis(ais[0][4])
                    print 'fix', ais[0], an, an.record_id
                    original_record_id = str(an.record_id)
                    path = analysis_path(an.record_id, repo_identifier)
                    obj = dvc_load(path)
                    obj['increment'] = 0

                    an.increment = 0
                    npath = analysis_path(an.record_id, repo_identifier)
                    dvc_dump(obj, npath)
                    os.remove(path)

                    for modifier in ('baselines', 'blanks', 'extraction',
                                     'intercepts', 'icfactors', 'peakcenter', '.data'):
                        npath = analysis_path(an.record_id, repo_identifier, modifier=modifier)
                        opath = analysis_path(original_record_id, repo_identifier, modifier=modifier)
                        # print opath, npath
                        os.rename(opath, npath)
Example #10
0
    def from_analysis(cls, an):
        tag = cls()
        tag.name = an.tag
        tag.record_id = an.record_id
        tag.repository_identifier = an.repository_identifier
        tag.path = analysis_path(an.record_id,
                                 an.repository_identifier,
                                 modifier='tags')

        return tag
Example #11
0
        def func(x, prog, i, n):
            pr, ai = x
            if prog:
                prog.change_message('Freezing Production {}'.format(ai.runid))

            p = analysis_path(ai.runid,
                              ai.repository_identifier,
                              'productions',
                              mode='w')
            pr.dump(path=p)
            added.append((ai.repository_identifier, p))
Example #12
0
    def dump(self):
        obj = {'name': self.name}
        if not self.path:
            self.path = analysis_path(self.record_id,
                                      self.repository_identifier,
                                      modifier='tags',
                                      mode='w')

        # with open(self.path, 'w') as wfile:
        #     json.dump(obj, wfile, indent=4)
        dvc_dump(obj, self.path)
def fix_meta(dest, repo_identifier, root):
    d = os.path.join(root, repo_identifier)
    changed = False
    with dest.session_ctx():
        repo = dest.get_repository(repo_identifier)
        for ra in repo.repository_associations:
            an = ra.analysis
            p = analysis_path(an.record_id, repo_identifier)
            obj = dvc_load(p)
            if not obj:
                print '********************** {} not found in repo'.format(
                    an.record_id)
                continue

            print an.record_id, p
            if not obj['irradiation']:
                obj['irradiation'] = an.irradiation
                lchanged = True
                changed = True
            if not obj['irradiation_position']:
                obj['irradiation_position'] = an.irradiation_position_position
                lchanged = True
                changed = True
            if not obj['irradiation_level']:
                obj['irradiation_level'] = an.irradiation_level
                lchanged = True
                changed = True
            if not obj['material']:
                obj['material'] = an.irradiation_position.sample.material.name
                lchanged = True
                changed = True
            if not obj['project']:
                obj['project'] = an.irradiation_position.sample.project.name
                lchanged = True
                changed = True

            if obj['repository_identifier'] != an.repository_identifier:
                obj['repository_identifier'] = an.repository_identifier
                lchanged = True
                changed = True

            if lchanged:
                print '{} changed'.format(an.record_id)
                dvc_dump(obj, p)

    if changed:
        from pychron.git_archive.repo_manager import GitRepoManager
        rm = GitRepoManager()
        rm.open_repo(d)

        repo = rm._repo
        repo.git.add('.')
        repo.git.commit('-m', '<MANUAL> fixed metadata')
        repo.git.push()
Example #14
0
    def add_experiment_association(self, expid, runspec):
        db = self.db
        with db.session_ctx():
            dban = db.get_analysis_uuid(runspec.uuid)
            for e in dban.experiment_associations:
                if e.experimentName == expid:
                    break
            else:
                db.add_experiment_association(expid, dban)

            src_expid = runspec.experiment_identifier
            if src_expid != expid:
                repo = self._get_experiment_repo(expid)

                for m in PATH_MODIFIERS:
                    src = analysis_path(runspec.record_id, src_expid, modifier=m)
                    dest = analysis_path(runspec.record_id, expid, modifier=m, mode='w')

                    shutil.copyfile(src, dest)
                    repo.add(dest, commit=False)
                repo.commit('added experiment association')
Example #15
0
def fix_meta(dest, repo_identifier, root):
    d = os.path.join(root, repo_identifier)
    changed = False
    with dest.session_ctx():
        repo = dest.get_repository(repo_identifier)
        for ra in repo.repository_associations:
            an = ra.analysis
            p = analysis_path(an.record_id, repo_identifier)
            obj = dvc_load(p)
            if not obj:
                print '********************** {} not found in repo'.format(an.record_id)
                continue

            print an.record_id, p
            if not obj['irradiation']:
                obj['irradiation'] = an.irradiation
                lchanged = True
                changed = True
            if not obj['irradiation_position']:
                obj['irradiation_position'] = an.irradiation_position_position
                lchanged = True
                changed = True
            if not obj['irradiation_level']:
                obj['irradiation_level'] = an.irradiation_level
                lchanged = True
                changed = True
            if not obj['material']:
                obj['material'] = an.irradiation_position.sample.material.name
                lchanged = True
                changed = True
            if not obj['project']:
                obj['project'] = an.irradiation_position.sample.project.name
                lchanged = True
                changed = True

            if obj['repository_identifier'] != an.repository_identifier:
                obj['repository_identifier'] = an.repository_identifier
                lchanged = True
                changed = True

            if lchanged:
                print '{} changed'.format(an.record_id)
                dvc_dump(obj, p)

    if changed:
        from pychron.git_archive.repo_manager import GitRepoManager
        rm = GitRepoManager()
        rm.open_repo(d)

        repo = rm._repo
        repo.git.add('.')
        repo.git.commit('-m', '<MANUAL> fixed metadata')
        repo.git.push()
Example #16
0
    def _transfer_analysis_to(self, dest, src, rid):
        p = analysis_path(rid, src)
        np = analysis_path(rid, dest)

        obj = dvc_load(p)
        obj['repository_identifier'] = dest
        dvc_dump(obj, p)

        ops = [p]
        nps = [np]

        shutil.move(p, np)

        for modifier in ('baselines', 'blanks', 'extraction', 'intercepts',
                         'icfactors', 'peakcenter', '.data'):
            p = analysis_path(rid, src, modifier=modifier)
            np = analysis_path(rid, dest, modifier=modifier)
            shutil.move(p, np)
            ops.append(p)
            nps.append(np)

        return ops, nps
Example #17
0
    def revert_manual_edits(self, runid, repository_identifier):
        ps = []
        for mod in ('intercepts', 'blanks', 'baselines', 'icfactors'):
            path = analysis_path(runid, repository_identifier, modifier=mod)
            with open(path, 'r') as rfile:
                obj = json.load(rfile)
                for item in obj.itervalues():
                    if isinstance(item, dict):
                        item['use_manual_value'] = False
                        item['use_manual_error'] = False
            ps.append(path)
            dvc_dump(obj, path)

        msg = '<MANUAL> reverted to non manually edited'
        self.commit_manual_edits(repository_identifier, ps, msg)
Example #18
0
    def update_analyses(self, ans, modifier, msg):
        key = lambda x: x.experiment_identifier
        ans = sorted(ans, key=key)
        mod_experiments = []
        for expid, ais in groupby(ans, key=key):
            paths = map(lambda x: analysis_path(x.record_id, x.experiment_identifier, modifier=modifier), ais)
            # print expid, modifier, paths
            if self.experiment_add_paths(expid, paths):
                self.experiment_commit(expid, msg)
                mod_experiments.append(expid)

        # ais = map(analysis_path, ais)
        #     if self.experiment_add_analyses(exp, ais):
        #         self.experiment_commit(exp, msg)
        #         mod_experiments.append(exp)
        return mod_experiments
Example #19
0
    def manual_edit(self, runid, repository_identifier, values, errors,
                    modifier):
        self.debug('manual edit {} {} {}'.format(runid, repository_identifier,
                                                 modifier))
        self.debug('values {}'.format(values))
        self.debug('errors {}'.format(errors))
        path = analysis_path(runid, repository_identifier, modifier=modifier)
        with open(path, 'r') as rfile:
            obj = json.load(rfile)
            for k, v in values.iteritems():
                o = obj[k]
                o['manual_value'] = v
                o['use_manual_value'] = True
            for k, v in errors.iteritems():
                o = obj[k]
                o['manual_error'] = v
                o['use_manual_error'] = True

        dvc_dump(obj, path)
        return path
Example #20
0
def get_review_status(record):
    ms = 0
    for m in ("blanks", "intercepts", "icfactors"):
        p = analysis_path(record.record_id, record.repository_identifier, modifier=m)
        date = ""
        with open(p, "r") as rfile:
            obj = json.load(rfile)
            reviewed = obj.get("reviewed", False)
            if reviewed:
                dt = datetime.fromtimestamp(os.path.getmtime(p))
                date = dt.strftime("%m/%d/%Y")
                ms += 1

        setattr(record, "{}_review_status".format(m), (reviewed, date))

    ret = "Intermediate"  # intermediate
    if not ms:
        ret = "Default"  # default
    elif ms == 3:
        ret = "All"  # all

    record.review_status = ret
Example #21
0
def get_review_status(record):
    ms = 0
    for m in ('blanks', 'intercepts', 'icfactors'):
        p = analysis_path(record.record_id,
                          record.repository_identifier,
                          modifier=m)
        date = ''
        with open(p, 'r') as rfile:
            obj = json.load(rfile)
            reviewed = obj.get('reviewed', False)
            if reviewed:
                dt = datetime.fromtimestamp(os.path.getmtime(p))
                date = dt.strftime('%m/%d/%Y')
                ms += 1

        setattr(record, '{}_review_status'.format(m), (reviewed, date))

    ret = 'Intermediate'  # intermediate
    if not ms:
        ret = 'Default'  # default
    elif ms == 3:
        ret = 'All'  # all

    record.review_status = ret
Example #22
0
 def _make_path(self, modifier=None, extension='.json'):
     runid = self.per_spec.run_spec.runid
     experiment_id = self.per_spec.run_spec.experiment_identifier
     return analysis_path(runid, experiment_id, modifier, extension, mode='w')
Example #23
0
 def _make_path(self, modifier=None, extension='.json'):
     runid = self.per_spec.run_spec.runid
     repository_identifier = self.per_spec.run_spec.repository_identifier
     return analysis_path(runid, repository_identifier, modifier, extension, mode='w')
Example #24
0
 def _add_interpreted_age(self, ia, d):
     p = analysis_path(ia.identifier,
                       ia.repository_identifier,
                       modifier='ia',
                       mode='w')
     dvc_dump(d, p)
Example #25
0
    def _do_diff_fired(self):
        if self.selected_commits:
            # n = len(self.selected_commits)
            # lhs = self.selected_lhs
            # if n == 1:
            #     rhsid = 'HEAD'
            #     obj = self.repo.head.commit
            #     rhsdate = isoformat_date(obj.committed_date)
            # if lhs.tag == 'IMPORT':
            #     diffs = []
            #     for a in ('blanks', 'icfactors', 'tags', 'intercepts'):
            #         p = analysis_path(self.record_id, self.repository_identifier, modifier=a)
            #         dd = get_diff(self.repo, lhs.hexsha, 'HEAD', p)
            #         if dd:
            #             diffs.append((a, dd))
            #     if diffs:
            #         v = ImportDiffView(self.record_id, lhsid, rhsid, lhsdate, rhsdate)
            #         for a, (aa, bb) in diffs:
            #             func = getattr(v, 'set_{}'.format(a))
            #             func(json.load(aa.data_stream),
            #                  json.load(bb.data_stream))
            #         v.finish()
            #         open_view(v)
            #
            #     return
            # else:
            #     d = get_diff(self.repo, lhs.hexsha, 'HEAD', lhs.path)
            #
            # elif n == 2:
            #     lhs = self.selected_lhs
            #     rhs = self.selected_rhs
            # else:
            #     warning(None, 'Can only diff max of 2')
            #     return

            lhs = self.selected_lhs
            rhs = self.selected_rhs

            lhsid = lhs.hexsha[:8]
            lhsdate = isoformat_date(lhs.date)

            rhsid = rhs.hexsha[:8]
            rhsdate = rhs.date.isoformat()

            diffs = []
            for a in ('blanks', 'icfactors', 'tags', 'intercepts'):
                p = analysis_path(self.record_id, self.repository_identifier, modifier=a)
                dd = get_diff(self.repo, lhs.hexsha, rhs.hexsha, p)
                if dd:
                    diffs.append((a, dd))

            if diffs:
                v = DiffView(self.record_id, lhsid, rhsid, lhsdate, rhsdate)
                for a, (aa, bb) in diffs:
                    func = getattr(v, 'set_{}'.format(a))
                    func(json.load(aa.data_stream),
                         json.load(bb.data_stream))
                v.finish()
                open_view(v)
            else:
                information(None, 'No Differences between {} and {}'.format(lhsid, rhsid))
Example #26
0
    def _do_diff_fired(self):
        if self.selected_commits:
            # n = len(self.selected_commits)
            # lhs = self.selected_lhs
            # if n == 1:
            #     rhsid = 'HEAD'
            #     obj = self.repo.head.commit
            #     rhsdate = isoformat_date(obj.committed_date)
            # if lhs.tag == 'IMPORT':
            #     diffs = []
            #     for a in ('blanks', 'icfactors', 'tags', 'intercepts'):
            #         p = analysis_path(self.record_id, self.repository_identifier, modifier=a)
            #         dd = get_diff(self.repo, lhs.hexsha, 'HEAD', p)
            #         if dd:
            #             diffs.append((a, dd))
            #     if diffs:
            #         v = ImportDiffView(self.record_id, lhsid, rhsid, lhsdate, rhsdate)
            #         for a, (aa, bb) in diffs:
            #             func = getattr(v, 'set_{}'.format(a))
            #             func(json.load(aa.data_stream),
            #                  json.load(bb.data_stream))
            #         v.finish()
            #         open_view(v)
            #
            #     return
            # else:
            #     d = get_diff(self.repo, lhs.hexsha, 'HEAD', lhs.path)
            #
            # elif n == 2:
            #     lhs = self.selected_lhs
            #     rhs = self.selected_rhs
            # else:
            #     warning(None, 'Can only diff max of 2')
            #     return

            lhs = self.selected_lhs
            rhs = self.selected_rhs

            lhsid = lhs.hexsha[:8]
            lhsdate = isoformat_date(lhs.date)

            rhsid = rhs.hexsha[:8]
            rhsdate = rhs.date.isoformat()

            diffs = []
            for a in ('blanks', 'icfactors', 'tags', 'intercepts'):
                p = analysis_path(self.record_id,
                                  self.repository_identifier,
                                  modifier=a)
                dd = get_diff(self.repo, lhs.hexsha, rhs.hexsha, p)
                if dd:
                    diffs.append((a, dd))

            if diffs:
                v = DiffView(self.record_id, lhsid, rhsid, lhsdate, rhsdate)
                for a, (aa, bb) in diffs:
                    func = getattr(v, 'set_{}'.format(a))
                    func(json.load(aa.data_stream), json.load(bb.data_stream))
                v.finish()
                open_view(v)
            else:
                information(
                    None,
                    'No Differences between {} and {}'.format(lhsid, rhsid))
Example #27
0
 def _add_interpreted_age(self, ia, d):
     p = analysis_path('{}.ia'.format(ia.identifier), ia.experiment_identifier)
     jdump(d, p)
Example #28
0
 def _get_frozen_production(self, rid, repo):
     path = analysis_path(rid, repo, 'productions')
     if path:
         return Production(path)