예제 #1
0
    def _get_selected_hexsha(self, commits, lc, rc, view_klass=None, auto_select=True,
                             tags=None, **kw):
        if view_klass is None:
            view_klass = CommitView

        lha = lc.hexsha[:7] if lc else ''
        rha = rc.hexsha[:7] if rc else ''
        ld = get_datetime(float(lc.committed_date)).strftime('%m-%d-%Y')

        rd = get_datetime(float(rc.committed_date)).strftime('%m-%d-%Y') if rc else ''

        n = len(commits)
        h = UpdateGitHistory(n=n, branchname=self.branch,
                             local_commit='{} ({})'.format(ld, lha),
                             head_hexsha=lc.hexsha,
                             latest_remote_commit='{} ({})'.format(rd, rha),
                             **kw)

        repo = self._repo
        commits = [repo.commit(i) for i in commits]
        h.set_items(commits, auto_select=auto_select)
        if tags:
            h.set_tags(tags)

        cv = view_klass(model=h)
        info = cv.edit_traits()
        if info.result:
            if h.selected:
                return h.selected.hexsha
예제 #2
0
    def find_project_overlaps(self, projects):
        tol_hrs = 6
        src = self.processor.db

        pdict = {}
        for p in projects:
            for ms in ('jan', 'obama'):
                ts, idxs = self._get_project_timestamps(p, ms, tol_hrs=tol_hrs)
                for i, ais in enumerate(array_split(ts, idxs + 1)):
                    if not ais.shape[0]:
                        self.debug('skipping {}'.format(i))
                        continue

                    low = get_datetime(ais[0]) - timedelta(hours=tol_hrs / 2.)
                    high = get_datetime(ais[-1]) + timedelta(hours=tol_hrs / 2.)

                    print '========{}, {}, {}'.format(ms, low, high)
                    with src.session_ctx():
                        runs = src.get_analyses_date_range(low, high,
                                                           projects=('REFERENCES',),
                                                           mass_spectrometers=(ms,))

                        pdict[p] = [ai.record_id for ai in runs]

        for p in projects:
            for o in projects:
                if p == o:
                    continue
                pruns = pdict[p]
                oruns = pdict[o]
                for ai in pruns:
                    if ai in oruns:
                        print p, o, ai
예제 #3
0
    def bulk_import_project(self, project, principal_investigator, source_name=None, dry=True):
        src = self.processor.db
        tol_hrs = 6
        self.debug('bulk import project={}, pi={}'.format(project, principal_investigator))
        oruns = []

        repository_identifier = project

        # def filterfunc(x):
        #     a = x.labnumber.irradiation_position is None
        #     b = False
        #     if not a:
        #         b = x.labnumber.irradiation_position.level.irradiation.name == irradname
        #
        #     d = False
        #     if x.extraction:
        #         ed = x.extraction.extraction_device
        #         if not ed:
        #             d = True
        #         else:
        #             d = ed.name == 'Fusions CO2'
        #
        #     return (a or b) and d

        if source_name is None:
            source_name = project

        for ms in ('jan', 'obama', 'felix'):
            ts, idxs = self._get_project_timestamps(source_name, ms, tol_hrs=tol_hrs)
            for i, ais in enumerate(array_split(ts, idxs + 1)):
                if not ais.shape[0]:
                    self.debug('skipping {}'.format(i))
                    continue

                low = get_datetime(ais[0]) - timedelta(hours=tol_hrs / 2.)
                high = get_datetime(ais[-1]) + timedelta(hours=tol_hrs / 2.)

                print('========{}, {}, {}'.format(ms, low, high))
                with src.session_ctx():
                    runs = src.get_analyses_date_range(low, high,
                                                       # labnumber=(63630, 63632, 63634, 63636, 63638, 63646, 63648),
                                                       projects=('REFERENCES', ),
                                                       # projects=('REFERENCES', source_name),
                                                       mass_spectrometers=(ms,))

                    if dry:
                        for ai in runs:
                            oruns.append(ai.record_id)
                            print(ai.measurement.mass_spectrometer.name, ai.record_id, ai.labnumber.sample.name, \
                                ai.analysis_timestamp)
                    else:
                        self.debug('================= Do Export i: {} low: {} high: {}'.format(i, low, high))
                        self.debug('N runs: {}'.format(len(runs)))
                        self.do_export([ai.record_id for ai in runs], repository_identifier, principal_investigator)

        return oruns
예제 #4
0
    def bulk_import_project(self, project, principal_investigator, dry=True):
        src = self.processor.db
        tol_hrs = 6
        self.debug('bulk import project={}, pi={}'.format(project, principal_investigator))
        oruns = []

        repository_identifier = project

        # def filterfunc(x):
        #     a = x.labnumber.irradiation_position is None
        #     b = False
        #     if not a:
        #         b = x.labnumber.irradiation_position.level.irradiation.name == irradname
        #
        #     d = False
        #     if x.extraction:
        #         ed = x.extraction.extraction_device
        #         if not ed:
        #             d = True
        #         else:
        #             d = ed.name == 'Fusions CO2'
        #
        #     return (a or b) and d
        #
        for ms in ('jan', 'obama'):
            ts, idxs = self._get_project_timestamps(project, ms, tol_hrs=tol_hrs)
            for i, ais in enumerate(array_split(ts, idxs + 1)):
                if not ais.shape[0]:
                    self.debug('skipping {}'.format(i))
                    continue

                low = get_datetime(ais[0]) - timedelta(hours=tol_hrs / 2.)
                high = get_datetime(ais[-1]) + timedelta(hours=tol_hrs / 2.)

                print '========{}, {}, {}'.format(ms, low, high)
                with src.session_ctx():
                    runs = src.get_analyses_date_range(low, high,
                                                       projects=('REFERENCES', project),
                                                       mass_spectrometers=(ms,))

                    if dry:
                        for ai in runs:
                            oruns.append(ai.record_id)
                            print ai.measurement.mass_spectrometer.name, ai.record_id, ai.labnumber.sample.name, \
                                ai.analysis_timestamp
                    else:
                        self.debug('================= Do Export i: {} low: {} high: {}'.format(i, low, high))
                        self.debug('N runs: {}'.format(len(runs)))
                        self.do_export([ai.record_id for ai in runs], repository_identifier, principal_investigator)

        return oruns
예제 #5
0
    def _get_find_parameters(self):
        f = FindAssociatedParametersDialog()

        ais = self.active_editor.analyses
        if ais:
            unks = ais
        elif self.analysis_table.selected:
            ans = self.analysis_table.selected
            unks = ans[:]
        elif self.selected_samples:
            ans = self.analysis_table.analyses
            unks = ans[:]
        elif self.selected_projects:
            with self.manager.db.session_ctx():
                ans = self._get_projects_analyzed_analyses(self.selected_projects)
                unks = [IsotopeRecordView(ai) for ai in ans]
        else:
            self.information_dialog('Select a list of projects, samples or analyses')
            return

        ts = [get_datetime(ai.timestamp) for ai in unks]
        lpost, hpost = min(ts), max(ts)
        f.model.nominal_lpost_date = lpost.date()
        f.model.nominal_hpost_date = hpost.date()

        f.model.nominal_lpost_time = lpost.time()
        f.model.nominal_hpost_time = hpost.time()

        ms = list(set([ai.mass_spectrometer for ai in unks]))
        f.model.available_mass_spectrometers = ms
        f.model.mass_spectrometers = ms

        info = f.edit_traits(kind='livemodal')
        if info.result:
            return f.model
예제 #6
0
    def find_associated_analyses(self, found=None, use_cache=True, progress=None):

        if self.active_editor:
            unks = self.active_editor.analyses

            key = lambda x: x.labnumber
            unks = sorted(unks, key=key)

            db = self.manager.db
            with db.session_ctx():
                tans = []
                if found is None:
                    uuids = []
                else:
                    uuids = found

                ngroups = len(list(groupby(unks, key=key)))
                if progress is None:
                    progress = self.manager.open_progress(ngroups + 1)
                else:
                    progress.increase_max(ngroups + 1)

                for ln, ais in groupby(unks, key=key):
                    msg = 'find associated analyses for labnumber {}'.format(ln)
                    self.debug(msg)
                    progress.change_message(msg)

                    ais = list(ais)
                    ts = [get_datetime(ai.timestamp) for ai in ais]
                    ref = ais[0]
                    ms = ref.mass_spectrometer
                    ed = ref.extract_device
                    self.debug("{} {}".format(ms, ed))
                    for atype in ('blank_unknown', 'blank_air', 'blank_cocktail',
                                  'air', 'cocktail'):
                        for i in range(10):
                            td = timedelta(hours=6 * (i + 1))
                            lpost, hpost = min(ts) - td, max(ts) + td

                            ans = db.get_date_range_analyses(lpost, hpost,
                                                             atype=atype,
                                                             spectrometer=ms)

                            if ans:
                                self.debug('{} {} to {}. nanalyses={}'.format(atype, lpost, hpost, len(ans)))
                                ans = [ai for ai in ans if ai.uuid not in uuids]
                                self.debug('new ans {}'.format(len(ans)))
                                if ans:
                                    tans.extend(ans)
                                    uuids.extend([ai.uuid for ai in ans])
                                break

                progress.soft_close()

                self.active_editor.set_items(tans, is_append=True,
                                             use_cache=use_cache, progress=progress)
                return uuids
예제 #7
0
 def pre_extraction_save(self):
     """
     set runtime and rundate
     """
     d = get_datetime()
     self.runtime = d.time()
     self.rundate = d.date()
     self.info('Analysis started at {}'.format(self.runtime))
     self._pre_extraction_save_hook()
예제 #8
0
 def pre_extraction_save(self):
     """
     set runtime and rundate
     """
     d = get_datetime()
     self.runtime = d.time()
     self.rundate = d.date()
     self.info('Analysis started at {}'.format(self.runtime))
     self._pre_extraction_save_hook()
예제 #9
0
        def factory(l):
            l = l.strip()
            if l:
                l, t, p = l.split(',')
                n = os.path.basename(p)
                n, _ = os.path.splitext(n)

                l = get_datetime(float(l)).strftime('%a %H:%M %m-%d-%Y')

                return LaunchItem(name=n, path=p, last_run_time=l, total_launches=int(t))
예제 #10
0
        def factory(l):
            l = l.strip()
            if l:
                l, t, p = l.split(',')
                n = os.path.basename(p)
                n, _ = os.path.splitext(n)

                l = get_datetime(float(l)).strftime('%a %H:%M %m-%d-%Y')

                return LaunchItem(name=n, path=p, last_run_time=l, total_launches=int(t))
def get_project_bins(project):
    # src = self.processor.db
    src = IsotopeDatabaseManager(
        dict(
            host='localhost',
            username=os.environ.get('LOCALHOST_DB_USER'),
            password=os.environ.get('LOCALHOST_DB_PWD'),
            kind='mysql',
            # echo=True,
            name='pychrondata'))
    tol_hrs = 6
    # self.debug('bulk import project={}, pi={}'.format(project, principal_investigator))
    ts, idxs = get_project_timestamps(src.db, project, tol_hrs=tol_hrs)

    # repository_identifier = project
    # def filterfunc(x):
    #     a = x.labnumber.irradiation_position is None
    #     b = False
    #     if not a:
    #         b = x.labnumber.irradiation_position.level.irradiation.name == irradname
    #
    #     d = False
    #     if x.extraction:
    #         ed = x.extraction.extraction_device
    #         if not ed:
    #             d = True
    #         else:
    #             d = ed.name == 'Fusions CO2'
    #
    #     return (a or b) and d
    #
    for ms in ('jan', 'obama'):
        for i, ais in enumerate(array_split(ts, idxs + 1)):
            if not ais.shape[0]:
                continue

            low = get_datetime(ais[0]) - timedelta(hours=tol_hrs / 2.)
            high = get_datetime(ais[-1]) + timedelta(hours=tol_hrs / 2.)

            print ms, low, high
예제 #12
0
def seasonal_subseries(x, y, **kw):
    ybins = [[], [], [], [], [], [], [], [], [], [], [], [],
            [], [], [], [], [], [], [], [], [], [], [], []]
    xbins = [[], [], [], [], [], [], [], [], [], [], [], [],
            [], [], [], [], [], [], [], [], [], [], [], []]
    m = 3600 * 24. / len(x)
    for xi, yi in zip(x, y):
        i = get_datetime(xi).hour
        ybins[i - 1].append(yi)
        xbins[i - 1].append((i - 1) * 3600 + (len(xbins[i - 1])) * m)
    ms = [np.mean(x) for x in ybins]

    return xbins, ybins, ms
예제 #13
0
def seasonal_subseries(x, y, **kw):
    ybins = [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [],
             [], [], [], [], [], [], [], []]
    xbins = [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [],
             [], [], [], [], [], [], [], []]
    m = 3600 * 24. / len(x)
    for xi, yi in zip(x, y):
        i = get_datetime(xi).hour
        ybins[i - 1].append(yi)
        xbins[i - 1].append((i - 1) * 3600 + (len(xbins[i - 1])) * m)
    ms = [np.mean(x) for x in ybins]

    return xbins, ybins, ms
예제 #14
0
def get_project_bins(project):
    # src = self.processor.db
    src = IsotopeDatabaseManager(dict(host='localhost',
                                      username=os.environ.get('LOCALHOST_DB_USER'),
                                      password=os.environ.get('LOCALHOST_DB_PWD'),
                                      kind='mysql',
                                      # echo=True,
                                      name='pychrondata'))
    tol_hrs = 6
    # self.debug('bulk import project={}, pi={}'.format(project, principal_investigator))
    ts, idxs = get_project_timestamps(src.db, project, tol_hrs=tol_hrs)

    # repository_identifier = project
    # def filterfunc(x):
    #     a = x.labnumber.irradiation_position is None
    #     b = False
    #     if not a:
    #         b = x.labnumber.irradiation_position.level.irradiation.name == irradname
    #
    #     d = False
    #     if x.extraction:
    #         ed = x.extraction.extraction_device
    #         if not ed:
    #             d = True
    #         else:
    #             d = ed.name == 'Fusions CO2'
    #
    #     return (a or b) and d
    #
    for ms in ('jan', 'obama'):
        for i, ais in enumerate(array_split(ts, idxs + 1)):
            if not ais.shape[0]:
                continue

            low = get_datetime(ais[0]) - timedelta(hours=tol_hrs / 2.)
            high = get_datetime(ais[-1]) + timedelta(hours=tol_hrs / 2.)

            print(ms, low, high)
예제 #15
0
def seasonal_subseries(x, y, **kw):
    from pychron.core.helpers.datetime_tools import get_datetime

    ybins = [[], [], [], [], [], [], [], [], [], [], [], [],
             [], [], [], [], [], [], [], [], [], [], [], []]
    xbins = [[], [], [], [], [], [], [], [], [], [], [], [],
             [], [], [], [], [], [], [], [], [], [], [], []]
    m = 3600 * 24. / len(x)
    for xi, yi in zip(x, y):
        i = get_datetime(xi).hour
        ybins[i - 1].append(yi)
        xbins[i - 1].append((i - 1) * 3600 + (len(xbins[i - 1])) * m)
    ms = [mean(x) for x in ybins]

    return xbins, ybins, ms
예제 #16
0
def seasonal_subseries(x, y, **kw):
    from pychron.core.helpers.datetime_tools import get_datetime

    ybins = [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [],
             [], [], [], [], [], [], [], []]
    xbins = [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [],
             [], [], [], [], [], [], [], []]
    m = 3600 * 24. / len(x)
    for xi, yi in zip(x, y):
        i = get_datetime(xi).hour
        ybins[i - 1].append(yi)
        xbins[i - 1].append((i - 1) * 3600 + (len(xbins[i - 1])) * m)
    ms = [mean(x) for x in ybins]

    return xbins, ybins, ms
예제 #17
0
파일: logviewer.py 프로젝트: NMGRL/pychron
    def _factory(self, ii):
        li = LogItem()
        li.name = 'foo'
        json_level = str(ii.get('log_level'))
        if 'debug' in json_level:
            li.level = 'DEBUG'
        elif 'info' in json_level:
            li.level = 'WARNING'
        else:
            li.level = 'INFO'

        li.timestamp = get_datetime(ii.get('log_time'))

        fmt = ii.get('log_format')
        li.message = str(fmt.format(**{k: tostr(v) for k, v in ii.items()}))

        return li
예제 #18
0
    def _factory(self, ii):
        li = LogItem()
        li.name = 'foo'
        json_level = str(ii.get('log_level'))
        if 'debug' in json_level:
            li.level = 'DEBUG'
        elif 'info' in json_level:
            li.level = 'WARNING'
        else:
            li.level = 'INFO'

        li.timestamp = get_datetime(ii.get('log_time'))

        fmt = ii.get('log_format')
        li.message = str(fmt.format(**{k: tostr(v) for k, v in ii.items()}))

        return li
예제 #19
0
    def _get_find_parameters(self):
        f = FindAssociatedParametersDialog()

        ais = self.active_editor.analyses
        if ais:
            unks = ais
        elif self.analysis_table.selected:
            ans = self.analysis_table.selected
            unks = ans[:]
        elif self.selected_samples:
            ans = self.analysis_table.analyses
            unks = ans[:]
        elif self.selected_projects:
            with self.manager.db.session_ctx():
                ans = self._get_projects_analyzed_analyses(
                    self.selected_projects)
                unks = [IsotopeRecordView(ai) for ai in ans]
        else:
            self.information_dialog(
                'Select a list of projects, samples or analyses')
            return

        ts = [get_datetime(ai.timestamp) for ai in unks]
        lpost, hpost = min(ts), max(ts)
        f.model.nominal_lpost_date = lpost.date()
        f.model.nominal_hpost_date = hpost.date()

        f.model.nominal_lpost_time = lpost.time()
        f.model.nominal_hpost_time = hpost.time()

        ms = list(set([ai.mass_spectrometer for ai in unks]))
        f.model.available_mass_spectrometers = ms
        f.model.mass_spectrometers = ms

        info = f.edit_traits(kind='livemodal')
        if info.result:
            return f.model
예제 #20
0
    def post_measurement_save(self, save_local=True):
        """
        check for runid conflicts. automatically update runid if conflict

        #. save to primary database (aka mainstore)
        #. save detector_ic to csv if applicable
        #. save to secondary database
        """
        if DEBUG:
            self.debug('Not measurement saving to database')
            return

        self.info('post measurement save')
        if not self.save_enabled:
            self.info('Database saving disabled')
            return

        # check for conflicts immediately before saving
        # automatically update if there is an issue
        run_spec = self.per_spec.run_spec
        conflict = self.datahub.is_conflict(run_spec)
        if conflict:
            self.debug('post measurement datastore conflict found. Automatically updating the aliquot and step')
            self.datahub.update_spec(run_spec)

        cp = self._current_data_frame

        ln = run_spec.labnumber
        aliquot = run_spec.aliquot

        if save_local:
            # save to local sqlite database for backup and reference
            self._local_db_save()

        # save to a database
        db = self.datahub.mainstore.db
        if not db or not db.connected:
            self.warning('No database instanc. Not saving post measurement to primary database')
        else:
            with db.session_ctx() as sess:
                pt = time.time()

                lab = db.get_labnumber(ln)

                endtime = get_datetime().time()
                self.info('analysis finished at {}'.format(endtime))

                un = run_spec.username
                dbuser = db.get_user(un)
                if dbuser is None:
                    self.debug('user= {} does not existing. adding to database now'.format(un))
                    dbuser = db.add_user(un)

                self.debug('adding analysis identifier={}, aliquot={}, '
                           'step={}, increment={}'.format(ln, aliquot,
                                                          run_spec.step,
                                                          run_spec.increment))
                a = db.add_analysis(lab,
                                    user=dbuser,
                                    uuid=run_spec.uuid,
                                    endtime=endtime,
                                    aliquot=aliquot,
                                    step=run_spec.step,
                                    increment=run_spec.increment,
                                    comment=run_spec.comment,
                                    whiff_result=self.per_spec.whiff_result)
                sess.flush()
                run_spec.analysis_dbid = a.id
                run_spec.analysis_timestamp = a.analysis_timestamp

                experiment = db.get_experiment(self.dbexperiment_identifier, key='id')
                if experiment is not None:
                    # added analysis to experiment
                    a.experiment_id = experiment.id
                else:
                    self.warning('no experiment found for {}'.format(self.dbexperiment_identifier))

                # save measurement
                meas = self._save_measurement(db, a)
                # save extraction
                ext = self._db_extraction_id
                if ext is not None:
                    dbext = db.get_extraction(ext, key='id')
                    a.extraction_id = dbext.id
                    # save sensitivity info to extraction
                    self._save_sensitivity(dbext, meas)

                else:
                    self.debug('no extraction to associate with this run')

                self._save_spectrometer_info(db, meas)

                # add selected history
                db.add_selected_histories(a)
                # self._save_isotope_info(a, ss)
                self._save_isotope_data(db, a)

                # save ic factor
                self._save_detector_intercalibration(db, a)

                # save blanks
                self._save_blank_info(db, a)

                # save peak center
                self._save_peak_center(db, a, cp)

                # save monitor
                self._save_monitor_info(db, a)

                # save gains
                self._save_gains(db, a)

                if self.use_analysis_grouping:
                    self._save_analysis_group(db, a)

                # mem_log('post pychron save')

                pt = time.time() - pt
                self.debug('pychron save time= {:0.3f} '.format(pt))
                # file_log(pt)

        self.debug('$$$$$$$$$$$$$$$ auto_save_detector_ic={}'.format(self.per_spec.auto_save_detector_ic))
        if self.per_spec.auto_save_detector_ic:
            try:
                self._save_detector_ic_csv()
            except BaseException, e:
                self.debug('Failed auto saving detector ic. {}'.format(e))
예제 #21
0
    def post_measurement_save(self, save_local=True):
        """
        check for runid conflicts. automatically update runid if conflict

        #. save to primary database (aka mainstore)
        #. save detector_ic to csv if applicable
        #. save to secondary database
        """
        # self.debug('AutomatedRunPersister post_measurement_save deprecated')
        # return

        if DEBUG:
            self.debug('Not measurement saving to database')
            return

        self.info('post measurement save')
        if not self.save_enabled:
            self.info('Database saving disabled')
            return

        # check for conflicts immediately before saving
        # automatically update if there is an issue
        run_spec = self.per_spec.run_spec

        # conflict = self.datahub.is_conflict(run_spec)
        # if conflict:
        #     self.debug('post measurement datastore conflict found. Automatically updating the aliquot and step')
        #     self.datahub.update_spec(run_spec)

        cp = self._current_data_frame

        ln = run_spec.labnumber
        aliquot = run_spec.aliquot

        if save_local:
            # save to local sqlite database for backup and reference
            self._local_db_save()

        # save to a database
        db = self.datahub.get_db('isotopedb')
        if not db or not db.connected:
            self.warning(
                'No database instance. Not saving post measurement to isotopedb database'
            )
        else:
            with db.session_ctx() as sess:
                pt = time.time()

                lab = db.get_labnumber(ln)

                endtime = get_datetime().time()
                self.info('analysis finished at {}'.format(endtime))

                un = run_spec.username
                dbuser = db.get_user(un)
                if dbuser is None:
                    self.debug(
                        'user= {} does not existing. adding to database now'.
                        format(un))
                    dbuser = db.add_user(un)

                self.debug('adding analysis identifier={}, aliquot={}, '
                           'step={}, increment={}'.format(
                               ln, aliquot, run_spec.step, run_spec.increment))
                a = db.add_analysis(lab,
                                    user=dbuser,
                                    uuid=run_spec.uuid,
                                    endtime=endtime,
                                    aliquot=aliquot,
                                    step=run_spec.step,
                                    increment=run_spec.increment,
                                    comment=run_spec.comment,
                                    whiff_result=self.per_spec.whiff_result)
                sess.flush()
                run_spec.analysis_dbid = a.id
                run_spec.analysis_timestamp = a.analysis_timestamp

                experiment = db.get_experiment(self.dbexperiment_identifier,
                                               key='id')
                if experiment is not None:
                    # added analysis to experiment
                    a.experiment_id = experiment.id
                else:
                    self.warning('no experiment found for {}'.format(
                        self.dbexperiment_identifier))

                # save measurement
                meas = self._save_measurement(db, a)
                # save extraction
                ext = self._db_extraction_id
                if ext is not None:
                    dbext = db.get_extraction(ext, key='id')
                    a.extraction_id = dbext.id
                    # save sensitivity info to extraction
                    self._save_sensitivity(dbext, meas)

                else:
                    self.debug('no extraction to associate with this run')

                self._save_spectrometer_info(db, meas)

                # add selected history
                db.add_selected_histories(a)
                # self._save_isotope_info(a, ss)
                self._save_isotope_data(db, a)

                # save ic factor
                self._save_detector_intercalibration(db, a)

                # save blanks
                self._save_blank_info(db, a)

                # save peak center
                self._save_peak_center(db, a, cp)

                # save monitor
                self._save_monitor_info(db, a)

                # save gains
                self._save_gains(db, a)

                if self.use_analysis_grouping:
                    self._save_analysis_group(db, a)

                # mem_log('post pychron save')

                pt = time.time() - pt
                self.debug('pychron save time= {:0.3f} '.format(pt))
                # file_log(pt)

        self.debug('$$$$$$$$$$$$$$$ auto_save_detector_ic={}'.format(
            self.per_spec.auto_save_detector_ic))
        if self.per_spec.auto_save_detector_ic:
            try:
                self._save_detector_ic_csv()
            except BaseException as e:
                self.debug('Failed auto saving detector ic. {}'.format(e))

        # don't save detector_ic runs to mass spec
        # measurement of an isotope on multiple detectors likely possible with mass spec but at this point
        # not worth trying.
        # if self.use_secondary_database:
        if self.use_massspec_database:
            from pychron.experiment.datahub import check_massspec_database_save

            if check_massspec_database_save(ln):
                if not self.datahub.store_connect('massspec'):
                    # if not self.massspec_importer or not self.massspec_importer.db.connected:
                    self.debug('Mass Spec database is not available')
                else:
                    self.debug('saving post measurement to secondary database')
                    # save to massspec
                    mt = time.time()
                    self._save_to_massspec(cp)
                    self.debug(
                        'mass spec save time= {:0.3f}'.format(time.time() -
                                                              mt))
예제 #22
0
    def _easy_find_associated_analyses(self,
                                       found=None,
                                       use_cache=True,
                                       progress=None):
        if self.active_editor:
            unks = self.active_editor.analyses

            key = lambda x: x.labnumber
            unks = sorted(unks, key=key)

            db = self.manager.db
            with db.session_ctx():
                tans = []
                if found is None:
                    uuids = []
                else:
                    uuids = found

                ngroups = len(list(groupby(unks, key=key)))
                if progress is None:
                    progress = self.manager.open_progress(ngroups + 1)
                else:
                    progress.increase_max(ngroups + 1)

                for ln, ais in groupby(unks, key=key):
                    msg = 'find associated analyses for labnumber {}'.format(
                        ln)
                    self.debug(msg)
                    progress.change_message(msg)

                    ais = list(ais)
                    ts = [get_datetime(ai.timestamp) for ai in ais]
                    ref = ais[0]
                    ms = ref.mass_spectrometer
                    ed = ref.extract_device
                    self.debug("{} {}".format(ms, ed))
                    for atype in ('blank_unknown', 'blank_air',
                                  'blank_cocktail', 'air', 'cocktail'):
                        for i in range(10):
                            td = timedelta(hours=6 * (i + 1))
                            lpost, hpost = min(ts) - td, max(ts) + td

                            ans = db.get_analyses_date_range(
                                lpost,
                                hpost,
                                analysis_type=atype,
                                mass_spectrometers=ms)

                            if ans:
                                self.debug('{} {} to {}. nanalyses={}'.format(
                                    atype, lpost, hpost, len(ans)))
                                ans = [
                                    ai for ai in ans if ai.uuid not in uuids
                                ]
                                self.debug('new ans {}'.format(len(ans)))
                                if ans:
                                    tans.extend(ans)
                                    uuids.extend([ai.uuid for ai in ans])
                                break

                progress.soft_close()

                self.active_editor.set_items(tans,
                                             is_append=True,
                                             use_cache=use_cache,
                                             progress=progress)
                return uuids
예제 #23
0
    def bulk_import_irradiation(self, irradname, creator, dry=True):

        src = self.processor.db
        tol_hrs = 6
        self.debug('bulk import irradiation {}'.format(irradname))
        oruns = []
        ts, idxs = self._get_irradiation_timestamps(irradname, tol_hrs=tol_hrs)
        print(ts)
        repository_identifier = 'Irradiation-{}'.format(irradname)

        # add project
        with self.dvc.db.session_ctx():
            self.dvc.db.add_project(repository_identifier, creator)

        def filterfunc(x):
            a = x.labnumber.irradiation_position is None
            b = False
            if not a:
                b = x.labnumber.irradiation_position.level.irradiation.name == irradname

            d = False
            if x.extraction:
                ed = x.extraction.extraction_device
                if not ed:
                    d = True
                else:
                    d = ed.name == 'Fusions CO2'

            return (a or b) and d

        # for ms in ('jan', 'obama'):

        # monitors not run on obama
        for ms in ('jan', ):
            for i, ais in enumerate(array_split(ts, idxs + 1)):
                if not ais.shape[0]:
                    self.debug('skipping {}'.format(i))
                    continue

                low = get_datetime(ais[0]) - timedelta(hours=tol_hrs / 2.)
                high = get_datetime(ais[-1]) + timedelta(hours=tol_hrs / 2.)
                with src.session_ctx():
                    ans = src.get_analyses_date_range(
                        low,
                        high,
                        mass_spectrometers=(ms, ),
                        samples=('FC-2', 'blank_unknown', 'blank_air',
                                 'blank_cocktail', 'air', 'cocktail'))

                    # runs = filter(lambda x: x.labnumber.irradiation_position is None or
                    #                         x.labnumber.irradiation_position.level.irradiation.name == irradname, ans)

                    runs = list(filter(filterfunc, ans))
                    if dry:
                        for ai in runs:
                            oruns.append(ai.record_id)
                            print(ms, ai.record_id)
                    else:
                        self.debug(
                            '================= Do Export i: {} low: {} high: {}'
                            .format(i, low, high))
                        self.debug('N runs: {}'.format(len(runs)))
                        self.do_export([ai.record_id for ai in runs],
                                       repository_identifier,
                                       creator,
                                       monitor_mapping=('FC-2', 'Sanidine',
                                                        repository_identifier))

        return oruns
예제 #24
0
    def bulk_import_irradiation(self, irradname, creator, dry=True):

        src = self.processor.db
        tol_hrs = 6
        self.debug('bulk import irradiation {}'.format(irradname))
        oruns = []
        ts, idxs = self._get_irradiation_timestamps(irradname, tol_hrs=tol_hrs)
        print ts
        repository_identifier = 'Irradiation-{}'.format(irradname)

        # add project
        with self.dvc.db.session_ctx():
            self.dvc.db.add_project(repository_identifier, creator)

        def filterfunc(x):
            a = x.labnumber.irradiation_position is None
            b = False
            if not a:
                b = x.labnumber.irradiation_position.level.irradiation.name == irradname

            d = False
            if x.extraction:
                ed = x.extraction.extraction_device
                if not ed:
                    d = True
                else:
                    d = ed.name == 'Fusions CO2'

            return (a or b) and d

        # for ms in ('jan', 'obama'):

        # monitors not run on obama
        for ms in ('jan',):
            for i, ais in enumerate(array_split(ts, idxs + 1)):
                if not ais.shape[0]:
                    self.debug('skipping {}'.format(i))
                    continue

                low = get_datetime(ais[0]) - timedelta(hours=tol_hrs / 2.)
                high = get_datetime(ais[-1]) + timedelta(hours=tol_hrs / 2.)
                with src.session_ctx():
                    ans = src.get_analyses_date_range(low, high,
                                                      mass_spectrometers=(ms,),
                                                      samples=('FC-2',
                                                               'blank_unknown', 'blank_air', 'blank_cocktail', 'air',
                                                               'cocktail'))

                    # runs = filter(lambda x: x.labnumber.irradiation_position is None or
                    #                         x.labnumber.irradiation_position.level.irradiation.name == irradname, ans)

                    runs = filter(filterfunc, ans)
                    if dry:
                        for ai in runs:
                            oruns.append(ai.record_id)
                            print ms, ai.record_id
                    else:
                        self.debug('================= Do Export i: {} low: {} high: {}'.format(i, low, high))
                        self.debug('N runs: {}'.format(len(runs)))
                        self.do_export([ai.record_id for ai in runs],
                                       repository_identifier, creator,
                                       monitor_mapping=('FC-2', 'Sanidine', repository_identifier))

        return oruns
예제 #25
0
    def post_measurement_save(self):
        if DEBUG:
            self.debug('Not measurement saving to database')
            return

        self.info('post measurement save')
        #         mem_log('pre post measurement save')
        if not self.save_enabled:
            self.info('Database saving disabled')
            return

        #check for conflicts immediately before saving
        #automatically update if there is an issue
        conflict = self.datahub.is_conflict(self.run_spec)
        if conflict:
            self.debug('post measurement datastore conflict found. Automatically updating the aliquot and step')
            self.datahub.update_spec(self.run_spec)

        cp = self._current_data_frame

        ln = self.run_spec.labnumber
        aliquot = self.run_spec.aliquot

        # save to local sqlite database for backup and reference
        # self._local_db_save()

        # save to a database
        db = self.datahub.mainstore.db
        #         if db and db.connect(force=True):
        if not db or not db.connected:
            self.warning('No database instanc. Not saving post measurement to primary database')
        else:
            with db.session_ctx() as sess:
                pt = time.time()

                lab = db.get_labnumber(ln)

                endtime = get_datetime().time()
                self.info('analysis finished at {}'.format(endtime))

                un = self.run_spec.username
                dbuser = db.get_user(un)
                if dbuser is None:
                    self.debug('user= {} does not existing. adding to database now'.format(un))
                    dbuser = db.add_user(un)

                self.debug('adding analysis identifier={}, aliquot={}, '
                           'step={}, increment={}'.format(ln, aliquot,
                                                       self.run_spec.step,
                                                       self.run_spec.increment))
                a = db.add_analysis(lab,
                                    user=dbuser,
                                    uuid=self.uuid,
                                    endtime=endtime,
                                    aliquot=aliquot,
                                    step=self.run_spec.step,
                                    increment=self.run_spec.increment,
                                    comment=self.run_spec.comment,
                                    whiff_result=self.whiff_result)
                sess.flush()
                self.run_spec.analysis_dbid = a.id
                self.run_spec.analysis_timestamp = a.analysis_timestamp

                experiment = db.get_experiment(self.experiment_identifier, key='id')
                if experiment is not None:
                    # added analysis to experiment
                    a.experiment_id = experiment.id
                else:
                    self.warning('no experiment found for {}'.format(self.experiment_identifier))

                # save measurement
                meas = self._save_measurement(db, a)
                # save extraction
                ext = self._db_extraction_id
                if ext is not None:
                    dbext = db.get_extraction(ext, key='id')
                    a.extraction_id = dbext.id
                    # save sensitivity info to extraction
                    self._save_sensitivity(dbext, meas)

                else:
                    self.debug('no extraction to associate with this run')

                self._save_spectrometer_info(db, meas)

                # add selected history
                db.add_selected_histories(a)
                # self._save_isotope_info(a, ss)
                self._save_isotope_data(db, a)

                # save ic factor
                self._save_detector_intercalibration(db, a)

                # save blanks
                self._save_blank_info(db, a)

                # save peak center
                self._save_peak_center(db, a, cp)

                # save monitor
                self._save_monitor_info(db, a)

                # save gains
                self._save_gains(db, a)

                if self.use_analysis_grouping:
                    self._save_analysis_group(db, a)

                mem_log('post pychron save')

                pt = time.time() - pt
                self.debug('pychron save time= {:0.3f} '.format(pt))
                file_log(pt)

        # don't save detector_ic runs to mass spec
        # measurement of an isotope on multiple detectors likely possible with mass spec but at this point
        # not worth trying.
        if self.use_secondary_database and check_secondary_database_save(ln):#not self.run_spec.analysis_type in ('detector_ic',):
            if not self.datahub.secondary_connect():
                # if not self.massspec_importer or not self.massspec_importer.db.connected:
                self.debug('Secondary database is not available')
            else:
                self.debug('saving post measurement to secondary database')
                # save to massspec
                mt = time.time()
                self._save_to_massspec(cp)
                self.debug('mass spec save time= {:0.3f}'.format(time.time() - mt))
                mem_log('post mass spec save')
예제 #26
0
 def pre_extraction_save(self):
     d = get_datetime()
     self.runtime = d.time()
     self.rundate = d.date()
     self.info('Analysis started at {}'.format(self.runtime))