Ejemplo n.º 1
0
    def _loop(self, n, d):

        mem_log('<loop start')

        invoke_in_main_thread(self._build_graph, n, d,)
        self._iter_event = Event()
        self._iter_event.wait(0.1)

        self._iter(n, d)
#         time.sleep(10)
        self._iter_event.wait(n * 1.1)

        mem_log('> loop finished')
Ejemplo n.º 2
0
    def _loop(self, n, d):

        mem_log('<loop start')

        invoke_in_main_thread(self._build_graph, n, d,)
        self._iter_event = Event()
        self._iter_event.wait(0.1)

        self._iter(n, d)
#         time.sleep(10)
        self._iter_event.wait(n * 1.1)

        mem_log('> loop finished')
Ejemplo n.º 3
0
    def _join_run(self, spec, run):
    #    def _join_run(self, spec, t, run):
    #        t.join()
        self._do_run(run)

        self.debug('{} finished'.format(run.runid))
        if self.isAlive():
            self.debug('spec analysis type {}'.format(spec.analysis_type))
            if spec.analysis_type.startswith('blank'):
                pb = run.get_baseline_corrected_signals()
                if pb is not None:
                    self._prev_blanks = pb
                    self.debug('previous blanks ={}'.format(pb))

        self._report_execution_state(run)
        run.teardown()
        mem_log('> end join')
Ejemplo n.º 4
0
    def _join_run(self, spec, run):
        #    def _join_run(self, spec, t, run):
        #        t.join()
        self._do_run(run)

        self.debug('{} finished'.format(run.runid))
        if self.isAlive():
            self.debug('spec analysis type {}'.format(spec.analysis_type))
            if spec.analysis_type.startswith('blank'):
                pb = run.get_baseline_corrected_signals()
                if pb is not None:
                    self._prev_blanks = pb
                    self.debug('previous blanks ={}'.format(pb))

        self._report_execution_state(run)
        run.teardown()
        mem_log('> end join')
Ejemplo n.º 5
0
    def _do_run(self, run):
        mem_log('< start')

        run.state = 'not run'

        q = self.experiment_queue
        #is this the last run in the queue. queue is not empty until _start runs so n==1 means last run
        run.is_last = len(q.cleaned_automated_runs) == 1

        self.current_run = run
        st = time.time()
        for step in ('_start',
                     '_extraction',
                     '_measurement',
                     '_post_measurement'):

            if not self.isAlive():
                break

            f = getattr(self, step)
            if not f(run):
                break
        else:
            self.debug('$$$$$$$$$$$$$$$$$$$$ state at run end {}'.format(run.state))
            if not run.state in ('truncated', 'canceled', 'failed'):
                run.state = 'success'

        if self.stats:
            self.stats.nruns_finished += 1

        if run.state in ('success', 'truncated'):
            self.run_completed = run

        self._remove_backup(run.uuid)
        # check to see if action should be taken
        self._check_run_at_end(run)

        t = time.time() - st
        self.info('Automated run {} {} duration: {:0.3f} s'.format(run.runid, run.state, t))

        run.finish()

        self.wait_group.pop()

        mem_log('end run')
Ejemplo n.º 6
0
    def _do_run(self, run):
        mem_log('< start')

        run.state = 'not run'

        q = self.experiment_queue
        #is this the last run in the queue. queue is not empty until _start runs so n==1 means last run
        run.is_last = len(q.cleaned_automated_runs) == 1

        self.current_run = run
        st = time.time()
        for step in ('_start', '_extraction', '_measurement',
                     '_post_measurement'):

            if not self.isAlive():
                break

            f = getattr(self, step)
            if not f(run):
                break
        else:
            self.debug('$$$$$$$$$$$$$$$$$$$$ state at run end {}'.format(
                run.state))
            if not run.state in ('truncated', 'canceled', 'failed'):
                run.state = 'success'

        if self.stats:
            self.stats.nruns_finished += 1

        if run.state in ('success', 'truncated'):
            self.run_completed = run

        self._remove_backup(run.uuid)
        # check to see if action should be taken
        self._check_run_at_end(run)

        t = time.time() - st
        self.info('Automated run {} {} duration: {:0.3f} s'.format(
            run.runid, run.state, t))

        run.finish()

        self.wait_group.pop()

        mem_log('end run')
Ejemplo n.º 7
0
    def post_measurement_save(self):
        if DEBUG:
            self.debug('Not measurement saving to database')
            return

        self.info('post measurement save')
        #         mem_log('pre post measurement save')
        if not self.save_enabled:
            self.info('Database saving disabled')
            return

        #check for conflicts immediately before saving
        #automatically update if there is an issue
        conflict = self.datahub.is_conflict(self.run_spec)
        if conflict:
            self.debug('post measurement datastore conflict found. Automatically updating the aliquot and step')
            self.datahub.update_spec(self.run_spec)

        cp = self._current_data_frame

        ln = self.run_spec.labnumber
        aliquot = self.run_spec.aliquot

        # save to local sqlite database for backup and reference
        # self._local_db_save()

        # save to a database
        db = self.datahub.mainstore.db
        #         if db and db.connect(force=True):
        if not db or not db.connected:
            self.warning('No database instanc. Not saving post measurement to primary database')
        else:
            with db.session_ctx() as sess:
                pt = time.time()

                lab = db.get_labnumber(ln)

                endtime = get_datetime().time()
                self.info('analysis finished at {}'.format(endtime))

                un = self.run_spec.username
                dbuser = db.get_user(un)
                if dbuser is None:
                    self.debug('user= {} does not existing. adding to database now'.format(un))
                    dbuser = db.add_user(un)

                self.debug('adding analysis identifier={}, aliquot={}, '
                           'step={}, increment={}'.format(ln, aliquot,
                                                       self.run_spec.step,
                                                       self.run_spec.increment))
                a = db.add_analysis(lab,
                                    user=dbuser,
                                    uuid=self.uuid,
                                    endtime=endtime,
                                    aliquot=aliquot,
                                    step=self.run_spec.step,
                                    increment=self.run_spec.increment,
                                    comment=self.run_spec.comment,
                                    whiff_result=self.whiff_result)
                sess.flush()
                self.run_spec.analysis_dbid = a.id
                self.run_spec.analysis_timestamp = a.analysis_timestamp

                experiment = db.get_experiment(self.experiment_identifier, key='id')
                if experiment is not None:
                    # added analysis to experiment
                    a.experiment_id = experiment.id
                else:
                    self.warning('no experiment found for {}'.format(self.experiment_identifier))

                # save measurement
                meas = self._save_measurement(db, a)
                # save extraction
                ext = self._db_extraction_id
                if ext is not None:
                    dbext = db.get_extraction(ext, key='id')
                    a.extraction_id = dbext.id
                    # save sensitivity info to extraction
                    self._save_sensitivity(dbext, meas)

                else:
                    self.debug('no extraction to associate with this run')

                self._save_spectrometer_info(db, meas)

                # add selected history
                db.add_selected_histories(a)
                # self._save_isotope_info(a, ss)
                self._save_isotope_data(db, a)

                # save ic factor
                self._save_detector_intercalibration(db, a)

                # save blanks
                self._save_blank_info(db, a)

                # save peak center
                self._save_peak_center(db, a, cp)

                # save monitor
                self._save_monitor_info(db, a)

                # save gains
                self._save_gains(db, a)

                if self.use_analysis_grouping:
                    self._save_analysis_group(db, a)

                mem_log('post pychron save')

                pt = time.time() - pt
                self.debug('pychron save time= {:0.3f} '.format(pt))
                file_log(pt)

        # don't save detector_ic runs to mass spec
        # measurement of an isotope on multiple detectors likely possible with mass spec but at this point
        # not worth trying.
        if self.use_secondary_database and check_secondary_database_save(ln):#not self.run_spec.analysis_type in ('detector_ic',):
            if not self.datahub.secondary_connect():
                # if not self.massspec_importer or not self.massspec_importer.db.connected:
                self.debug('Secondary database is not available')
            else:
                self.debug('saving post measurement to secondary database')
                # save to massspec
                mt = time.time()
                self._save_to_massspec(cp)
                self.debug('mass spec save time= {:0.3f}'.format(time.time() - mt))
                mem_log('post mass spec save')