Example #1
0
def separate_curve_fit(entry, run, **kwargs):

    # data
    year, run = tuple(map(int, run.split('.')))
    dat = bd.bdata(run, year)

    # fit function
    pexp = pulsed_exp(lifetime=bd.life.Li8, pulse_len=dat.pulse_s)

    p0 = [float(entry[k]['p0'].get()) for k in entry.keys()]
    blo = [float(entry[k]['blo'].get()) for k in entry.keys()]
    bhi = [float(entry[k]['bhi'].get()) for k in entry.keys()]

    # fit
    t, a, da = dat.asym('c')
    par, cov = curve_fit(pexp,
                         t,
                         a,
                         sigma=da,
                         absolute_sigma=True,
                         p0=p0,
                         bounds=[blo, bhi],
                         **kwargs)
    std = np.diag(cov)**0.5

    # chi2
    chi = np.sum(((a - pexp(t, *par)) / da)**2) / (len(a) - 2)

    return (par, std, std, chi)
Example #2
0
    def write(self, filename, **notes):
        """
            Write to yaml file
            
            filename:       name of file to write to 
            notes:          additional fields to write
        """

        # read attributes
        dat = bd.bdata(self.run, self.year)
        output = {
            key: self.__dict__[key]
            for key in ('run', 'year', 'rebin', 'maxiter', 'lamb', 'probe')
        }
        output = {'title': dat.title, **output, **notes}

        # make numpy arrays lists
        output['p'] = self.results.apply(np.ndarray.tolist).tolist()
        output['alpha'] = self.results.index.tolist()
        output['lamb'] = output['lamb'].tolist()

        # write to file
        print("writing...", end=' ', flush=True)
        with open(filename, 'w') as fid:
            fid.write(yaml.safe_dump(output))
        print("done", flush=True)
Example #3
0
def test_load_run_from_file(tab=None, b=None):

    # load a file
    dat = bd.bdata(40123, 2020)

    # get file location
    if 'BNMR_ARCHIVE' in os.environ:
        path = os.path.join(os.environ['BNMR_ARCHIVE'], '2020', '040123.msr')
    else:
        path = os.path.join(os.environ['HOME'], '.bdata', 'bnmr', '2020',
                            '040123.msr')

    # load file
    tab.load_file(filename=path)

    # check inputs disabled
    assert tab.entry_year['state'] == 'disabled', 'year state == disabled'
    assert tab.entry_runn['state'] == 'disabled', 'runn state == disabled'

    # check that run loaded
    assert tab.data.title == dat.title, 'tab data loaded correctly'

    # check that fetch loads correct data
    tab.get_data()
    assert tab.data.title == dat.title, 'tab data loaded correctly'

    # turn off load file
    tab.load_file(filename='')

    # check inputs enabled
    assert tab.entry_year['state'] == 'normal', 'year state == normal'
    assert tab.entry_runn['state'] == 'normal', 'runn state == normal'

    # check that runs now fetch properly as normal
    tab.runn.set(40127)
    tab.year.set(2020)
    tab.get_data()
    assert tab.data.title == bd.bdata(40127,
                                      2020).title, 'tab data loaded correctly'
Example #4
0
    def find(self, *args):
        """
            Find deadtime of entered run
        """

        # get data
        try:
            data = bd.bdata(self.run.get(), self.year.get())
        except RuntimeError as msg:
            messagebox.showerror('Bad run input', str(msg))
            raise msg

        # get fix state
        fixed = []
        if self.fix_dt.get(): fixed.append('dt')
        if self.fix_c.get(): fixed.append('c')

        # get initial values
        p0 = {'dt': self.dt * 1e-9, 'c': self.c}

        # find the correction
        try:
            m = data.get_deadtime(**p0, fixed=fixed, return_minuit=True)
        except RuntimeError as msg:
            messagebox.showerror('Bad run input', str(msg))
            raise msg
        except bd.exceptions.MinimizationError as msg:
            messagebox.showerror('Minimization failed', str(msg))
            raise msg

        self.dt = m.values['dt_ns']
        self.c = m.values['c']
        chi2 = m.fval

        # set the strings
        self.dt_inpt.set('%f' % self.dt)
        self.c_inpt.set('%f' % self.c)
        self.chi.set('Flattening χ2 = %.3f' % chi2)

        # set the value
        self.toggle_scope()

        # activate deadtime usage
        self.bfit.deadtime_switch.set(True)
Example #5
0
 def read(self):
     """Read data file"""
     
     # bdata access
     self.bd = bdata(self.run,self.year)
             
     # set temperature 
     try:
         self.temperature = self.bd.camp.smpl_read_A
     except AttributeError:
         self.logger.exception('Thermometer smpl_read_A not found')
         try:
             self.temperature = self.bd.camp.oven_readC
         except AttributeError:
             self.logger.exception('Thermometer oven_readC not found')
             self.temperature = -1111
     
     # field
     try:
         if self.bd.area == 'BNMR':
             self.field = self.bd.camp.b_field.mean
             self.field_std = self.bd.camp.b_field.std
         else:
             self.field = current2field(self.bd.epics.hh_current.mean)*1e-4
             self.field_std = current2field(self.bd.epics.hh_current.std)*1e-4
     except AttributeError:
         self.logger.exception('Field not found')
         self.field = np.nan
         self.field_std = np.nan
         
     # bias
     try:
         if self.bd.area == 'BNMR': 
             self.bias = self.bd.epics.nmr_bias.mean
             self.bias_std = self.bd.epics.nmr_bias.std
         else:
             self.bias = self.bd.epics.nqr_bias.mean/1000.
             self.bias_std = self.bd.epics.nqr_bias.std/1000.
     except AttributeError:
         self.logger.exception('Bias not found')
         self.bias = np.nan
Example #6
0
    def _setup(self):

        # get data
        dat = bd.bdata(self.run, self.year)
        self.x, self.y, self.yerr = dat.asym('c', rebin=self.rebin)

        # remove zero error values
        idx = self.yerr != 0
        self.x = self.x[idx]
        self.y = self.y[idx]
        self.yerr = self.yerr[idx]

        # get function
        f = pulsed_exp(lifetime=bd.life[self.probe],
                       pulse_len=dat.get_pulse_s())
        self.fn = lambda x, w: f(x, w, 1)

        # build error matrix
        self.S = np.diag(1 / self.yerr)

        # set the kernel
        self.K = np.array([self.fn(self.x, i) for i in self.lamb]).T
Example #7
0
def separate_migrad(entry, run, do_minos=False, **kwargs):

    # data
    year, run = tuple(map(int, run.split('.')))
    dat = bd.bdata(run, year)

    # fit function
    pexp = pulsed_exp(lifetime=bd.life.Li8, pulse_len=dat.pulse_s)

    # get p0, bounds
    p0 = {
        k.replace('1_T1', 'lambda_s'): float(entry[k]['p0'].get())
        for k in entry.keys()
    }
    bounds = [(float(entry[k]['blo'].get()), float(entry[k]['bhi'].get()))
              for k in entry.keys()]

    # fit
    t, a, da = dat.asym('c')
    m = minuit(pexp, t, a, da, **p0, limit=bounds, **kwargs)
    m.migrad()

    if do_minos: m.minos()

    par = m.values

    if do_minos:
        n = len(par)
        lower = np.abs(np.array([m.merrors[i].lower for i in range(n)]))
        upper = np.array([m.merrors[i].upper for i in range(n)])
    else:
        lower = m.errors
        upper = lower

    chi = m.chi2

    return (par, lower, upper, chi)
Example #8
0
    def get_data(self, quiet=False):
        """Display data and send bdata object to bfit draw list. 
        Return True on success, false on Failure
        """

        # settings
        mode_dict = {
            "1f": "Frequency Scan",
            "1w": "Frequency Comb",
            "1n": "Rb Cell Scan",
            "1e": "Field Scan",
            "20": "SLR",
            '2h': 'SLR with Alpha Tracking',
            '2s': 'Spin Echo',
            '2e': 'Randomized Frequency Scan'
        }

        # fetch year
        try:
            year = self.year.get()
        except ValueError:
            for t in [self.text_nw, self.text_ne, self.text_sw, self.text_se]:
                self.set_textbox_text(t, 'Year input must be integer valued')
                self.logger.exception('Year input must be integer valued')
            return False

        # fetch run number
        run = self.runn.get()

        self.logger.debug('Parsing run input %s', run)

        if run < 40000:

            runlist = []
            # look for latest run by run number
            for d in [
                    self.bfit.bnmr_archive_label, self.bfit.bnqr_archive_label
            ]:
                dirloc = os.environ[d]
                runlist.extend(
                    glob.glob(os.path.join(dirloc, str(year),
                                           '0%d*.msr' % run)))
            runlist = [
                int(os.path.splitext(os.path.basename(r))[0]) for r in runlist
            ]

            # get latest run by max run number
            try:
                run = max(runlist)
            except ValueError:
                self.logger.exception('Run fetch failed')
                for t in [
                        self.text_nw, self.text_ne, self.text_sw, self.text_se
                ]:
                    self.set_textbox_text(t, 'Run not found.')
                return False

        self.logger.info('Fetching run %s from %s', run, year)

        # get data
        try:
            data = fitdata(self.bfit, bdata(run, year=year))
        except ValueError:
            self.logger.exception('File read failed.')
            for t in [self.text_nw, self.text_sw, self.text_se, self.text_ne]:
                self.set_textbox_text(t, 'File read failed.')
            return False
        except RuntimeError:
            self.logger.exception('File does not exist.')
            for t in [self.text_nw, self.text_sw, self.text_se, self.text_ne]:
                self.set_textbox_text(t, 'File does not exist.')
            return False

        # set data field
        self.data = data

        # set draw parameters
        self.bfit.set_asym_calc_mode_box(data.mode, self)

        # quiet mode: don't update text
        if quiet: return True

        # NE -----------------------------------------------------------------

        # get data: headers
        mode = mode_dict[data.mode]
        try:
            if data.ppg.rf_enable.mean and data.mode == '20' and \
                                                        data.ppg.rf_on.mean > 0:
                mode = "Hole Burning"
        except AttributeError:
            pass

        mins, sec = divmod(data.duration, 60)
        duration = "%dm %ds" % (mins, sec)

        # set dictionary
        data_nw = {
            "Run": '%d (%d)' % (data.run, data.year),
            "Area": data.area,
            "Run Mode": "%s (%s)" % (mode, data.mode),
            "Title": data.title,
            "Experimenters": data.experimenter,
            "Sample": data.sample,
            "Orientation": data.orientation,
            "Experiment": str(data.exp),
            "Run Duration": duration,
            "Start": data.start_date,
            "End": data.end_date,
            "": "",
        }

        # set key order
        key_order_nw = [
            'Run',
            'Run Mode',
            'Title',
            '',
            'Start',
            'End',
            'Run Duration',
            '',
            'Sample',
            'Orientation',
            '',
            'Experiment',
            'Area',
            'Experimenters',
        ]

        # SW -----------------------------------------------------------------
        data_sw = {'': ''}
        key_order_sw = []

        # get data: temperature and fields
        try:
            temp = data.camp.smpl_read_A.mean
            temp_stdv = data.camp.smpl_read_A.std
            data_sw["Temperature"] = "%.2f +/- %.2f K" % (temp, temp_stdv)
            key_order_sw.append('Temperature')
        except AttributeError:
            pass

        try:
            curr = data.camp.smpl_current
            data_sw["Heater Current"] = "%.2f +/- %.2f A" % (curr.mean,
                                                             curr.std)
            key_order_sw.append('Heater Current')
        except AttributeError:
            pass

        try:
            temp = data.camp.oven_readC.mean
            temp_stdv = data.camp.oven_readC.std
            data_sw['Oven Temperature'] = "%.2f +/- %.2f K" % (temp, temp_stdv)
            key_order_sw.append('Oven Temperature')
        except AttributeError:
            pass

        try:
            curr = data.camp.oven_current
            data_sw['Oven Current'] = "%.2f +/- %.2f A" % (curr.mean, curr.std)
            key_order_sw.append('Oven Current')
        except AttributeError:
            pass

        try:
            field = np.around(data.camp.b_field.mean, 3)
            field_stdv = np.around(data.camp.b_field.std, 3)
            data_sw['Magnetic Field'] = "%.3f +/- %.3f T" % (field, field_stdv)
            key_order_sw.append('Magnetic Field')
        except AttributeError:
            pass

        try:
            val = current2field(data.epics.hh_current.mean)
            data_sw['Magnetic Field'] = "%.3f Gauss" % val
            key_order_sw.append('Magnetic Field')
        except AttributeError:
            pass

        key_order_sw.append('')

        # cryo options
        try:
            mass = data.camp.mass_read
            data_sw['Mass Flow'] = "%.3f +/- %.3f" % (mass.mean, mass.std)
            key_order_sw.append('Mass Flow')
        except AttributeError:
            pass

        try:
            cryo = data.camp.cryo_read
            data_sw['CryoEx Mass Flow'] = "%.3f +/- %.3f" % (cryo.mean,
                                                             cryo.std)
            key_order_sw.append('CryoEx Mass Flow')
        except AttributeError:
            pass

        try:
            data_sw[
                'Needle Setpoint'] = "%.3f turns" % data.camp.needle_set.mean
            key_order_sw.append('Needle Setpoint')
        except AttributeError:
            pass

        try:
            data_sw[
                'Needle Readback'] = "%.3f turns" % data.camp.needle_pos.mean
            key_order_sw.append('Needle Readback')
        except AttributeError:
            pass

        try:
            lift_set = np.around(data.camp.clift_set.mean, 3)
            data_sw['Cryo Lift Setpoint'] = "%.3f mm" % lift_set
            key_order_sw.append('Cryo Lift Setpoint')
        except AttributeError:
            pass

        try:
            lift_read = np.around(data.camp.clift_read.mean, 3)
            data_sw['Cryo Lift Readback'] = "%.3f mm" % lift_read
            key_order_sw.append('Cryo Lift Readback')
        except AttributeError:
            pass

        key_order_sw.append('')

        # rates and counts
        hist = ('F+','F-','B-','B+') if data.area == 'BNMR' \
                                     else ('L+','L-','R-','R+')
        try:
            val = int(np.sum([data.hist[h].data for h in hist]))
            data_sw['Total Counts Sample'] = f'{val:,}'.replace(',', ' ')
            key_order_sw.append('Total Counts Sample')
        except AttributeError:
            pass

        try:
            val = int(
                np.sum([data.hist[h].data for h in hist]) / data.duration)
            data_sw['Rate Sample'] = f'{val:,} (1/s)'.replace(',', ' ')
            key_order_sw.append('Rate Sample')
        except AttributeError:
            pass

        hist = ('F+', 'F-', 'B-', 'B+')
        try:
            val = int(np.sum([data.hist['NBM' + h].data for h in hist]))
            data_sw['Total Counts NBM'] = f'{val:,}'.replace(',', ' ')
            key_order_sw.append('Total Counts NBM')
        except AttributeError:
            pass

        try:
            val = int(
                np.sum([data.hist['NBM' + h].data
                        for h in hist]) / data.duration)
            data_sw['Rate NBM'] = f'{val:,} (1/s)'.replace(',', ' ')
            key_order_sw.append('Rate NBM')
        except AttributeError:
            pass

        # rf dac
        if mode != 'SLR':
            key_order_sw.append('')
            try:
                data_sw['rf_dac'] = "%d" % int(data.camp.rf_dac.mean)
                key_order_sw.append('rf_dac')
            except AttributeError:
                pass

            try:
                data_sw[
                    'RF Amplifier Gain'] = "%.2f" % data.camp.rfamp_rfgain.mean
                key_order_sw.append('RF Amplifier Gain')
            except AttributeError:
                pass

        # SE -----------------------------------------------------------------
        data_se = {'': ''}
        key_order_se = []

        # get data: biases
        try:
            if 'nqr_bias' in data.epics.keys():
                bias = data.epics.nqr_bias.mean / 1000.
                bias_std = data.epics.nqr_bias.std / 1000.
            elif 'nmr_bias' in data.epics.keys():
                bias = data.epics.nmr_bias.mean
                bias_std = data.epics.nmr_bias.std

            data_se["Platform Bias"] = "%.3f +/- %.3f kV" % \
                    (np.around(bias,3),np.around(bias_std,3))
            key_order_se.append("Platform Bias")

        except UnboundLocalError:
            pass

        try:
            data_se["BIAS15"] = "%.3f +/- %.3f V" % \
                    (np.around(data.epics.bias15.mean,3),
                     np.around(data.epics.bias15.std,3))
            key_order_se.append('BIAS15')
        except AttributeError:
            pass

        # get data: beam energy
        try:
            init_bias = data.epics.target_bias.mean
            init_bias_std = data.epics.target_bias.std
        except AttributeError:
            try:
                init_bias = data.epics.target_bias.mean
                init_bias_std = data.epics.target_bias.std
            except AttributeError:
                pass

        try:
            val = np.around(init_bias / 1000., 3)
            std = np.around(init_bias_std / 1000., 3)
            data_se["Initial Beam Energy"] = "%.3f +/- %.3f keV" % (val, std)
            key_order_se.append('Initial Beam Energy')
        except UnboundLocalError:
            pass

        # Get final beam energy
        try:
            val = np.around(data.beam_kev(), 3)
            std = np.around(data.beam_kev(get_error=True), 3)
            data_se['Implantation Energy'] = "%.3f +/- %.3f keV" % (val, std)
            key_order_se.append('Implantation Energy')
        except AttributeError:
            pass

        key_order_se.append('')

        # laser stuff
        try:
            val = data.epics.las_pwr
            data_se['Laser Power'] = "%.3f +/- %.3f A" % (val.mean, val.std)
            key_order_se.append('Laser Power')
        except AttributeError:
            pass

        # magnet stuff
        try:
            val = data.epics.hh_current.mean
            std = data.epics.hh_current.std
            data_se['Magnet Current'] = "%.3f +/- %.3f A" % (val, std)
            key_order_se.append('Magnet Current')
        except AttributeError:
            pass

        # NE -----------------------------------------------------------------
        data_ne = {'': ''}
        key_order_ne = []

        # get data: SLR data
        if data.mode in ['20', '2h']:
            try:
                dwell = int(data.ppg.dwelltime.mean)
                data_ne['Dwell Time'] = "%d ms" % dwell
                key_order_ne.append('Dwell Time')
            except AttributeError:
                pass

            try:
                beam = int(data.ppg.prebeam.mean)
                data_ne[
                    'Number of Prebeam Dwelltimes'] = "%d dwelltimes" % beam
                key_order_ne.append('Number of Prebeam Dwelltimes')
            except AttributeError:
                pass

            try:
                beam = int(data.ppg.beam_on.mean)
                data_ne[
                    'Number of Beam On Dwelltimes'] = "%d dwelltimes" % beam
                key_order_ne.append('Number of Beam On Dwelltimes')
            except AttributeError:
                pass

            try:
                beam = int(data.ppg.beam_off.mean)
                data_ne[
                    'Number of Beam Off Dwelltimes'] = "%d dwelltimes" % beam
                key_order_ne.append('Number of Beam Off Dwelltimes')
            except AttributeError:
                pass

            try:
                rf = int(data.ppg.rf_on_delay.mean)
                data_ne['RF On Delay'] = "%d dwelltimes" % rf
                key_order_ne.append('RF On Delay')
            except AttributeError:
                pass

            try:
                rf = int(data.ppg.rf_on.mean)
                data_ne['RF On Duration'] = "%d dwelltimes" % rf
                key_order_ne.append('RF On Duration')
            except AttributeError:
                pass

            try:
                hel = bool(data.ppg.hel_enable.mean)
                data_ne['Flip Helicity'] = str(hel)
                key_order_ne.append('Flip Helicity')
            except AttributeError:
                pass

            try:
                hel = int(data.ppg.hel_sleep.mean)
                data_ne['Helicity Flip Sleep'] = "%d ms" % hel
                key_order_ne.append('Helicity Flip Sleep')
            except AttributeError:
                pass

            key_order_ne.append('')

            try:
                rf = bool(data.ppg.rf_enable.mean)
                data_ne['RF Enable'] = str(rf)
                key_order_ne.append('RF Enable')

                if rf:
                    freq = int(data.ppg.freq.mean)
                    data_ne['Frequency'] = "%d Hz" % freq
                    key_order_ne.append('Frequency')
            except AttributeError:
                pass

        # get 1F specific data
        elif data.mode == '1f':
            try:
                val = int(data.ppg.dwelltime.mean)
                data_ne['Bin Width'] = "%d ms" % val
                key_order_ne.append('Bin Width')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.nbins.mean)
                data_ne['Number of Bins'] = "%d" % val
                key_order_ne.append('Number of Bins')
            except AttributeError:
                pass

            try:
                val = bool(data.ppg.const_t_btwn_cycl.mean)
                data_ne['Enable Const Time Between Cycles'] = str(val)
                key_order_ne.append('Enable Const Time Between Cycles')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.freq_start.mean)
                data_ne['Frequency Scan Start'] = '%d Hz' % val
                key_order_ne.append('Frequency Scan Start')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.freq_stop.mean)
                data_ne['Frequency Scan End'] = '%d Hz' % val
                key_order_ne.append('Frequency Scan End')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.freq_incr.mean)
                data_ne['Frequency Scan Increment'] = '%d Hz' % val
                key_order_ne.append('Frequency Scan Increment')
            except AttributeError:
                pass

            try:
                val = bool(data.ppg.hel_enable.mean)
                data_ne['Flip Helicity'] = str(val)
                key_order_ne.append('Flip Helicity')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.hel_sleep.mean)
                data_ne['Helicity Flip Sleep'] = "%d ms" % val
                key_order_ne.append('Helicity Flip Sleep')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.ncycles.mean)
                data_ne['Number of Cycles per Scan Increment'] = '%d' % val
                key_order_ne.append('Number of Cycles per Scan Increment')
            except AttributeError:
                pass

        # get 1E specific data
        elif data.mode == '1e':
            try:
                val = int(data.ppg.dwelltime.mean)
                data_ne['Bin Width'] = "%d ms" % val
                key_order_ne.append('Bin Width')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.nbins.mean)
                data_ne['Number of Bins'] = "%d" % val
                key_order_ne.append('Number of Bins')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.field_start.mean)
                data_ne['Field Scan Start'] = '%d G' % val
                key_order_ne.append('Field Scan Start')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.field_stop.mean)
                data_ne['Field Scan End'] = '%d G' % val
                key_order_ne.append('Field Scan End')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.field_incr.mean)
                data_ne['Field Scan Increment'] = '%d G' % val
                key_order_ne.append('Field Scan Increment')
            except AttributeError:
                pass

            try:
                val = bool(data.ppg.hel_enable.mean)
                data_ne['Flip Helicity'] = str(val)
                key_order_ne.append('Flip Helicity')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.hel_sleep.mean)
                data_ne['Helicity Flip Sleep'] = "%d ms" % val
                key_order_ne.append('Helicity Flip Sleep')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.ncycles.mean)
                data_ne['Number of Cycles per Scan Increment'] = '%d' % val
                key_order_ne.append('Number of Cycles per Scan Increment')
            except AttributeError:
                pass

        # get 1W specific data
        elif data.mode == '1w':
            try:
                val = int(data.ppg.dwelltime.mean)
                data_ne['Bin Width'] = "%d ms" % val
                key_order_ne.append('Bin Width')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.nbins.mean)
                data_ne['Number of Bins'] = "%d" % val
                key_order_ne.append('Number of Bins')
            except AttributeError:
                pass

            try:
                val = bool(data.ppg.const_t_btwn_cycl.mean)
                data_ne['Enable Const Time Between Cycles'] = str(val)
                key_order_ne.append('Enable Const Time Between Cycles')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.service_t.mean)
                data_ne['DAQ Service Time'] = "%d ms" % val
                key_order_ne.append('DAQ Service Time')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.xstart.mean)
                data_ne['Parameter x Start'] = '%d' % val
                key_order_ne.append('Parameter x Start')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.xstop.mean)
                data_ne['Parameter x Stop'] = '%d' % val
                key_order_ne.append('Parameter x Stop')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.xincr.mean)
                data_ne['Parameter x Increment'] = '%d' % val
                key_order_ne.append('Parameter x Increment')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.yconst.mean)
                data_ne['Parameter y (constant)'] = '%d' % val
                key_order_ne.append('Parameter y (constant)')
            except AttributeError:
                pass

            try:
                val = str(data.ppg.freqfn_f1.units)
                data_ne['CH1 Frequency Function(x)'] = val
                key_order_ne.append('CH1 Frequency Function(x)')
            except AttributeError:
                pass

            try:
                val = str(data.ppg.freqfn_f2.units)
                data_ne['CH2 Frequency Function(x)'] = val
                key_order_ne.append('CH2 Frequency Function(x)')
            except AttributeError:
                pass

            try:
                val = str(data.ppg.freqfn_f3.units)
                data_ne['CH3 Frequency Function(x)'] = val
                key_order_ne.append('CH3 Frequency Function(x)')
            except AttributeError:
                pass

            try:
                val = str(data.ppg.freqfn_f4.units)
                data_ne['CH4 Frequency Function(x)'] = val
                key_order_ne.append('CH4 Frequency Function(x)')
            except AttributeError:
                pass

            try:
                val = bool(data.ppg.hel_enable.mean)
                data_ne['Flip Helicity'] = str(val)
                key_order_ne.append('Flip Helicity')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.hel_sleep.mean)
                data_ne['Helicity Flip Sleep'] = "%d ms" % val
                key_order_ne.append('Helicity Flip Sleep')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.ncycles.mean)
                data_ne['Number of Cycles per Scan Increment'] = '%d' % val
                key_order_ne.append('Number of Cycles per Scan Increment')
            except AttributeError:
                pass

            try:
                val = bool(data.ppg.fref_enable.mean)
                data_ne['Freq Reference Enabled'] = str(val)
                key_order_ne.append('Freq Reference Enabled')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.fref_scale.mean)
                data_ne['Freq Reference Scale Factor'] = '%d' % val
                key_order_ne.append('Freq Reference Scale Factor')
            except AttributeError:
                pass

        # get Rb Cell specific data
        elif data.mode in ['1n']:

            try:
                dwell = int(data.ppg.dwelltime.mean)
                data_ne['Bin Width'] = "%d ms" % dwell
                key_order_ne.append('Bin Width')
            except AttributeError:
                pass

            # get mode
            try:
                custom_enable = bool(data.ppg.customv_enable.mean)
            except AttributeError:
                custom_enable = False

            # custom varible scan
            if custom_enable:

                try:
                    val = str(data.ppg.customv_name_write.units)
                    data_ne['EPICS variable name (for writing)'] = '%s' % val
                    key_order_ne.append('EPICS variable name (for writing)')
                except AttributeError:
                    pass

                try:
                    val = str(data.ppg.customv_name_read.units)
                    data_ne['EPICS variable name (for readback)'] = '%s' % val
                    key_order_ne.append('EPICS variable name (for readback)')
                except AttributeError:
                    pass

                try:
                    val = int(data.ppg.customv_scan_start.mean)
                    data_ne['Scan start value'] = '%d' % val
                    key_order_ne.append('Scan start value')
                except AttributeError:
                    pass

                try:
                    val = int(data.ppg.customv_scan_stop.mean)
                    data_ne['Scan stop value'] = '%d' % val
                    key_order_ne.append('Scan stop value')
                except AttributeError:
                    pass

                try:
                    val = int(data.ppg.customv_scan_incr.mean)
                    data_ne['Scan increment'] = '%d' % val
                    key_order_ne.append('Scan increment')
                except AttributeError:
                    pass

            # normal Rb cell scan
            else:
                try:
                    val = int(data.ppg.volt_start.mean)
                    data_ne['Start Rb Scan'] = '%d Volts' % val
                    key_order_ne.append('Start Rb Scan')
                except AttributeError:
                    pass

                try:
                    val = int(data.ppg.volt_stop.mean)
                    data_ne['Stop Rb Scan'] = '%d Volts' % val
                    key_order_ne.append('Stop Rb Scan')
                except AttributeError:
                    pass

                try:
                    val = int(data.ppg.volt_incr.mean)
                    data_ne['Scan Increment'] = '%d Volts' % val
                    key_order_ne.append('Scan Increment')
                except AttributeError:
                    pass

            try:
                val = int(data.ppg.nbins.mean)
                data_ne['Number of Bins'] = '%d' % val
                key_order_ne.append('Number of Bins')
            except AttributeError:
                pass

            try:
                val = bool(data.ppg.hel_enable.mean)
                data_ne['Flip Helicity'] = str(val)
                key_order_ne.append('Flip Helicity')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.hel_sleep.mean)
                data_ne['Helicity Flip Sleep'] = "%d ms" % val
                key_order_ne.append('Helicity Flip Sleep')
            except AttributeError:
                pass

        # get 2e mode specific data
        elif data.mode in ['2e']:

            try:
                val = int(data.ppg.rf_on_ms.mean)
                data_ne['RF On Time'] = "%d ms" % val
                key_order_ne.append('RF On Time')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.rf_on_delay.mean)
                data_ne['Number of RF On Delays'] = "%d" % val
                key_order_ne.append('Number of RF On Delays')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.beam_off_ms.mean)
                data_ne['Beam Off Time'] = "%d ms" % val
                key_order_ne.append('Beam Off Time')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.ndwell_post_on.mean)
                data_ne['Number of post RF BeamOn Dwelltimes'] = "%d" % val
                key_order_ne.append('Number of post RF BeamOn Dwelltimes')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.ndwell_per_f.mean)
                data_ne['Number of Dwelltimes per Frequency'] = "%d" % val
                key_order_ne.append('Number of Dwelltimes per Frequency')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.freq_start.mean)
                data_ne['Frequency Scan Start'] = "%d Hz" % val
                key_order_ne.append('Frequency Scan Start')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.freq_stop.mean)
                data_ne['Frequency Scan Stop'] = "%d Hz" % val
                key_order_ne.append('Frequency Scan Stop')
            except AttributeError:
                pass

            try:
                val = int(data.ppg.freq_incr.mean)
                data_ne['Frequency Scan Increment'] = "%d Hz" % val
                key_order_ne.append('Frequency Scan Increment')
            except AttributeError:
                pass

            try:
                val = bool(data.ppg.rand_freq_val.mean)
                data_ne['Randomize Frequency Scan Increments'] = str(val)
                key_order_ne.append('Randomize Frequency Scan Increments')
            except AttributeError:
                pass

            try:
                val = bool(data.ppg.hel_enable.mean)
                data_ne['Flip Helicity'] = str(val)
                key_order_ne.append('Flip Helicity')
            except AttributeError:
                pass

            try:
                val = bool(data.ppg.hel_enable.mean)
                data_ne['Helicity Flip Sleep'] = "%d ms" % val
                key_order_ne.append('Helicity Flip Sleep')
            except AttributeError:
                pass

            key_order_ne.append('')

        # set viewer string

        def set_str(data_dict, key_order, txtbox):

            m = max(max(map(len, list(data_dict.keys()))) + 1, 5)
            s = '\n'.join(
                [k.rjust(m) + ': ' + data_dict[k] for k in key_order])
            self.set_textbox_text(txtbox, s)

        set_str(data_nw, key_order_nw, self.text_nw)
        set_str(data_ne, key_order_ne, self.text_ne)
        set_str(data_sw, key_order_sw, self.text_sw)
        set_str(data_se, key_order_se, self.text_se)

        return True
Example #9
0
    def __init__(self,
                 runs,
                 years,
                 fn,
                 sharelist,
                 npar=-1,
                 xlims=None,
                 rebin=1,
                 asym_mode='c',
                 fixed=None):
        """
            runs:       list of run numbers
            
            years:      list of years corresponding to run numbers, or int which applies to all
            
            fn:         list of function handles to fit (or single which applies to all)
                        must specify inputs explicitly (do not do def fn(*par)!)
                        must have len(fn) = len(runs) if list
                        
            sharelist:  list of bool to indicate which parameters are shared. 
                        True if shared
                        len = number of parameters.
                        
            npar:       number of free parameters in each fitting function.
                        Set if number of parameters is not intuitable from 
                            function code.            
            
            xlims:      list of 2-tuples for (low,high) bounds on fitting range 
                            based on x values. If list is not depth 2, use this 
                            range on all runs.
            
            rebin:      rebinning factor on fitting and drawing data
            
            fixed:      list of booleans indicating if the paramter is to be 
                        fixed to p0 value (same length as p0). Returns best 
                        parameters in order presented, with the fixed 
                        parameters omitted.
            
            asym_mode:  asymmetry type to calculate and fit (combined helicities only)
        """

        # Set years
        if not isinstance(years, collections.Iterable):
            years = [years] * len(runs)

        # Set rebin
        if not isinstance(rebin, collections.Iterable):
            rebin = [rebin] * len(runs)

        # Get asymmetry
        data = [
            bdata(r, year=y).asym(asym_mode, rebin=re)
            for r, y, re in zip(runs, years, rebin)
        ]

        # split into x,y,dy data sets
        x = np.array([d[0] for d in data])
        y = np.array([d[1] for d in data])
        dy = np.array([d[2] for d in data])

        # select subrange
        if xlims is not None:

            # check depth
            if len(np.array(xlims).shape) < 2:
                xlims = [xlims for i in range(len(x))]

            # initialize new inputs
            xnew = []
            ynew = []
            dynew = []

            # select subrange
            for i, xl in enumerate(xlims):
                tag = (xl[0] < x[i]) * (x[i] < xl[1])
                xnew.append(x[i][tag])
                ynew.append(y[i][tag])
                dynew.append(dy[i][tag])

            # new arrays
            x = np.array(xnew)
            y = np.array(ynew)
            dy = np.array(dynew)

        # intialize
        super(global_bdata_fitter, self).__init__(x, y, dy, fn, sharelist,
                                                  npar, fixed)
Example #10
0
    def get_data(self):
        """Split data into parts, and assign to dictionary."""

        self.logger.debug('Fetching runs')

        # make list of run numbers, replace possible deliminators
        try:
            run_numbers = self.string2run(self.run.get())
        except ValueError:
            self.logger.exception('Bad run number string')
            raise ValueError('Bad run number string')

        # get list of merged runs
        merged_runs = self.get_merged_runs(self.run.get())

        # get the selected year
        year = int(self.year.get())

        # get data
        all_data = {}
        s = ['Failed to open run']
        for r in run_numbers:

            # read from archive
            try:
                all_data[r] = bdata(r, year=year)
            except (RuntimeError, ValueError):
                s.append("%d (%d)" % (r, year))

        # print error message
        if len(s) > 1:
            s = '\n'.join(s)
            print(s)
            self.logger.warning(s)
            messagebox.showinfo(message=s)

        # merge runs
        new_data = []
        for merge in merged_runs:

            # collect data
            dat_to_merge = []
            for r in merge:
                dat_to_merge.append(all_data.pop(r))

            # make bjoined object
            new_data.append(bmerged(dat_to_merge))

        new_data.extend(list(all_data.values()))

        # update object data lists
        data = {}
        for new_dat in new_data:

            # get key for data storage
            runkey = self.bfit.get_run_key(new_dat)

            # update data
            if runkey in self.bfit.data.keys():
                self.bfit.data[runkey].read()

            # new data
            else:
                data[runkey] = fitdata(self.bfit, new_dat)

        # check that data is all the same runtype
        run_types = [d.mode for d in self.bfit.data.values()]
        run_types = run_types + [d.mode for d in data.values()]

        # check for equivalent run modes
        selected_mode = [run_types[0]]
        for equiv in self.equivalent_modes:
            if selected_mode[0] in equiv:
                selected_mode = equiv
                break

        # different run types: select all runs of same type
        if not all([r in selected_mode for r in run_types]):

            # unique run modes
            run_type_unique = np.unique(run_types)

            # message for multiple run modes
            message = "Multiple run types detected:\n("
            message += ', '.join(run_type_unique)
            message += ')\n\nSelecting ' + ' and '.join(
                selected_mode) + ' runs.'
            messagebox.showinfo(message=message)

        # get only run_types[0]
        self.logger.debug('Fetching runs of mode %s', selected_mode)
        for k in tuple(data.keys()):
            if data[k].mode in selected_mode:
                self.bfit.data[k] = data[k]
            else:
                del data[k]

        # get runmode
        try:
            self.runmode = selected_mode
        except IndexError:
            s = 'No valid runs detected.'
            messagebox.showerror(message=s)
            self.logger.warning(s)
            raise RuntimeError(s)

        self.runmode_label['text'] = self.runmode_relabel[self.runmode[0]]

        # get area
        area = [d.area for d in self.bfit.data.values()]
        area = ''.join(np.unique(area))

        # set asym type comboboxes
        self.bfit.set_asym_calc_mode_box(self.runmode[0], self, area)
        self.bfit.set_asym_calc_mode_box(self.runmode[0], self.bfit.fit_files,
                                         area)

        keys_list = list(self.bfit.data.keys())
        keys_list.sort()

        # make lines
        n = 1
        for r in keys_list:

            # new line
            if r not in self.data_lines.keys():

                if r in self.data_lines_old.keys():
                    self.data_lines[r] = self.data_lines_old[r]
                    self.data_lines[r].set_label()
                    del self.data_lines_old[r]
                else:
                    self.data_lines[r] = dataline(\
                                            bfit = self.bfit, \
                                            lines_list = self.data_lines, \
                                            lines_list_old = self.data_lines_old,
                                            fetch_tab_frame = self.dataline_frame, \
                                            bdfit = self.bfit.data[r], \
                                            row = n)
            self.data_lines[r].grid(n)
            n += 1

        # remove old runs, modes not selected
        for r in tuple(self.data_lines.keys()):
            if self.data_lines[r].bdfit.mode not in self.runmode:
                self.data_lines[r].degrid()

        # set nbm variable
        self.set_nbm()

        self.logger.info('Fetched runs %s', list(data.keys()))
Example #11
0
    def draw(self, *args):
        """
            Draw deadtime corrected data
        """

        # get data
        try:
            data = bd.bdata(self.run.get(), self.year.get())
        except bd.InputError as msg:
            messagebox.showerror('Bad run input', str(msg))
            raise msg

        asym = data.asym('hel')
        asym_dt = data.asym('hel', deadtime=self.dt * 1e-9)

        asym_dt['n'] = list(asym_dt['n'])
        asym_dt['n'][0] *= self.c
        asym_dt['n'][1] *= self.c

        # draw split helicity ------------------------------------------------
        plt.figure()
        plt.errorbar(asym['time_s'],
                     *asym['p'],
                     fmt='.C0',
                     zorder=0,
                     label='Uncorrected')
        plt.errorbar(asym['time_s'], *asym['n'], fmt='.C0', zorder=0)
        plt.errorbar(asym_dt['time_s'],
                     *asym_dt['p'],
                     fmt='.C3',
                     zorder=5,
                     alpha=0.4,
                     label='Corrected')
        plt.errorbar(asym_dt['time_s'],
                     *asym_dt['n'],
                     fmt='.C3',
                     zorder=5,
                     alpha=0.4)

        # plot elements
        plt.ylabel('Asymmetry')
        plt.xlabel('Time (s)')
        plt.title('Run %d.%d\nDeadtime correction of %.3f ns' % \
            (self.year.get(), self.run.get(), self.dt) +\
            '\nNeg Helicity Scaling of %.3f' % self.c,
            fontsize='x-small')
        plt.legend(fontsize='small')
        plt.tight_layout()

        # draw helicity difference -------------------------------------------
        dasym = 0.5 * (asym['p'][0] + asym['n'][0])
        ddasym = 0.5 * (asym['p'][1]**2 + asym['n'][1]**2)**0.5

        dasym_sub = dasym - np.mean(dasym)
        ddasym_sub = (dasym**2 + np.std(dasym)**2 / len(dasym))**2

        dasym_dt = 0.5 * (asym_dt['p'][0] + asym_dt['n'][0])
        ddasym_dt = 0.5 * (asym_dt['p'][1]**2 + asym_dt['n'][1]**2)**0.5

        dasym_dt_sub = dasym_dt - np.mean(dasym_dt)
        ddasym_dt_sub = (dasym_dt**2 + np.std(dasym_dt)**2 / len(dasym_dt))**2

        plt.figure()

        plt.errorbar(asym['time_s'],
                     dasym_sub,
                     ddasym_sub,
                     fmt='.C0',
                     zorder=0,
                     label='Uncorrected')
        plt.errorbar(asym_dt['time_s'],
                     dasym_dt_sub,
                     ddasym_dt_sub,
                     fmt='.C3',
                     zorder=5,
                     alpha=0.4,
                     label='Corrected')

        plt.axhline(0, ls='-', color='k', zorder=10)

        # plot elements
        plt.ylabel(r'$\frac{1}{2}(\mathcal{A}_+ + c\mathcal{A}_-) - ' +\
                     r'\frac{1}{2} \overline{(\mathcal{A}_+ + c\mathcal{A}_-)}$',
                     fontsize='small')
        plt.xlabel('Time (s)')
        plt.title('Run %d.%d\nDeadtime correction of %.3f ns' % \
            (self.year.get(), self.run.get(), self.dt) +\
            '\nNeg Helicity Scaling: c = %.3f' % self.c,
            fontsize='x-small')
        plt.legend(fontsize='small')
        plt.tight_layout()
Example #12
0
year = 2018

# get BNMR data
files = glob.glob(os.environ['BNMR_ARCHIVE'] + '/%d/*.msr' % year)
runs = map(lambda x: int(os.path.splitext(os.path.basename(x))[0]), files)

# get duration and histogram counts of runs
duration = []
counts = []
hist = ['B+', 'B-', 'F+', 'F-']
for r in tqdm.tqdm(runs, total=len(files)):

    if r <= 40000:
        continue

    b = bd.bdata(r, year)
    if b.mode == '20':
        duration.append(b.duration)
        hcount = 0
        for h in hist:
            hcount += sum(b.hist[h].data)

        counts.append(hcount)

# histogram the data
h_dur, b_dur = np.histogram(duration, bins=np.arange(0, 3600, 60))
h_cnt, b_cnt = np.histogram(counts, bins=np.arange(1e7, 1e9, 1e7))

# bin centers
b_dur = (b_dur[1:] + b_dur[:-1]) / 2
b_cnt = (b_cnt[1:] + b_cnt[:-1]) / 2
Example #13
0
# basic dev testing the ilt 
# Derek Fujimoto
# Feb 2020

from bILT.src.ilt import *
from bfit.fitting.functions import pulsed_exp 
import bdata as bd
import numpy as np

x,y,dy = bd.bdata(40214, year=2009).asym('c') 
T1 = np.logspace(np.log(0.01 * 1.2096), np.log(100.0 * 1.2096), 100)
alpha = np.logspace(2, 5, 50)

f = pulsed_exp(1.21,4)
fn = lambda x,w: f(x,w,1) 
I = ilt(x,y,dy,fn,T1,nproc=4)
I.fit(alpha)

# ~ plt.figure()
# ~ I.draw_fit(10)

# ~ plt.figure()
# ~ I.draw_weights(10)

# ~ plt.figure()
# ~ I.draw_logdist(10)

# ~ plt.figure()
# ~ I.draw_Lcurve()

# ~ plt.figure()
Example #14
0
    def get_data(self):
        """Split data into parts, and assign to dictionary."""

        self.logger.debug('Fetching runs')

        # make list of run numbers, replace possible deliminators
        try:
            run_numbers = self.string2run(self.run.get())
        except ValueError:
            self.logger.exception('Bad run number string')
            return

        # get the selected year
        year = int(self.year.get())

        # get data
        data = {}
        s = ['Failed to open run']
        for r in run_numbers:

            # get key for data storage
            runkey = self.bfit.get_run_key(r=r, y=year)

            # read from archive
            try:
                new_data = bdata(r, year=year)
            except (RuntimeError, ValueError):
                s.append("%d (%d)" % (r, year))
            else:

                # update data
                if runkey in self.bfit.data.keys():
                    self.bfit.data[runkey].bd = new_data

                # new data
                else:
                    data[runkey] = fitdata(self.bfit, new_data)

        # print error message
        if len(s) > 1:
            s = '\n'.join(s)
            print(s)
            self.logger.warning(s)
            messagebox.showinfo(message=s)

        # check that data is all the same runtype
        run_types = [self.bfit.data[k].mode for k in self.bfit.data.keys()]
        run_types = run_types + [data[k].mode for k in data.keys()]

        # different run types: select all runs of same type
        if not all([r == run_types[0] for r in run_types]):

            # unique run modes
            run_type_unique = np.unique(run_types)

            # message
            message = "Multiple run types detected:\n("
            for m in run_type_unique:
                message += m + ', '
            message = message[:-2]
            message += ')\n\nSelecting ' + run_types[0] + ' runs.'
            messagebox.showinfo(message=message)

        # get only run_types[0]
        self.logger.debug('Fetching runs of mode %s', run_types[0])
        for k in tuple(data.keys()):
            if data[k].mode == run_types[0]:
                self.bfit.data[k] = data[k]
            else:
                del data[k]

        try:
            self.runmode = run_types[0]
        except IndexError:
            s = 'No valid runs detected.'
            messagebox.showerror(message=s)
            self.logger.warning(s)
            raise RuntimeError(s)
        self.runmode_label['text'] = self.runmode_relabel[self.runmode]
        self.bfit.set_asym_calc_mode_box(self.runmode, self)
        self.bfit.set_asym_calc_mode_box(self.runmode, self.bfit.fit_files)

        keys_list = list(self.bfit.data.keys())
        keys_list.sort()

        # make lines
        n = 1
        for r in keys_list:

            # new line
            if r not in self.data_lines.keys():

                if r in self.data_lines_old.keys():
                    self.data_lines[r] = self.data_lines_old[r]
                    self.data_lines[r].set_label()
                    del self.data_lines_old[r]
                else:
                    self.data_lines[r] = dataline(\
                                            bfit = self.bfit,\
                                            lines_list = self.data_lines,\
                                            lines_list_old = self.data_lines_old,
                                            fetch_tab_frame = self.dataline_frame,\
                                            bdfit = self.bfit.data[r],\
                                            row = n)
            self.data_lines[r].grid(n)
            n += 1

        # remove old runs, modes not selected
        for r in tuple(self.data_lines.keys()):
            if self.data_lines[r].bdfit.mode != self.runmode:
                self.data_lines[r].degrid()

        self.logger.info('Fetched runs %s', list(data.keys()))
Example #15
0
def fit_single(run,
               year,
               fn,
               omit='',
               rebin=1,
               hist_select='',
               xlim=None,
               asym_mode='c',
               fixed=None,
               **kwargs):
    """
        Fit combined asymetry from bdata.
    
        runs:           run number
        
        years:          year
        
        fn:             function handle to fit
        
        omit:           string of space-separated bin ranges to omit
        rebin:          rebinning of data prior to fitting. 
        
        hist_select:    string for selecting histograms to use in asym calc
        
        xlim:           2-tuple for (low,high) bounds on fitting range based on 
                            x values
        
        asym_mode:      input for asymmetry calculation type 
                            c: combined helicity
                            h: split helicity
                            
                        For 2e mode, prefix with:
                            sl_: combined timebins using slopes
                            dif_: combined timebins using differences
                            raw_: raw time-resolved
                            
                            ex: sl_c or raw_h or dif_c
        
        fixed:          list of booleans indicating if the paramter is to be 
                        fixed to p0 value (same length as p0). Returns best 
                        parameters in order presented, with the fixed 
                        parameters omitted.
        
        kwargs:         keyword arguments for curve_fit. See curve_fit docs. 
        
        Returns: (par,cov,chi)
            par: best fit parameters
            cov: covariance matrix
            chi: chisquared of fit
    """

    # Get data input
    data = bdata(run, year)
    x, y, dy = _get_asym(data, asym_mode, rebin=rebin, omit=omit)

    # check for values with error == 0. Omit these values.
    tag = dy != 0
    x = x[tag]
    y = y[tag]
    dy = dy[tag]

    # apply xlimits
    if xlim is not None:
        tag = (xlim[0] < x) * (x < xlim[1])
        x = x[tag]
        y = y[tag]
        dy = dy[tag]

    # p0
    if 'p0' not in kwargs:
        kwargs['p0'] = np.ones(fn.__code__.co_argcount - 1)

    # fixed parameters
    did_fixed = False
    if fixed is not None and any(fixed):

        # save stuff for inflation
        did_fixed = True
        p0 = np.copy(kwargs['p0'])
        npar = len(p0)

        # dumb case: all values fixed:
        if all(fixed):
            cov = np.full((npar, npar), np.nan)
            chi = np.sum(np.square((y - fn(x, *p0)) / dy)) / len(y)
            return (p0, cov, chi)

        # prep inputs
        fixed = np.asarray(fixed)
        if 'bounds' in kwargs: bounds = kwargs['bounds']
        else: bounds = None

        # get fixed version
        fn, kwargs['p0'], bounds = _get_fixed_values(fixed, fn, kwargs['p0'],
                                                     bounds)

        # modify fiting inputs
        if bounds is not None: kwargs['bounds'] = bounds

    # Fit the function
    par, cov = curve_fit(fn, x, y, sigma=dy, absolute_sigma=True, **kwargs)
    dof = len(y) - len(kwargs['p0'])

    # get chisquared
    chi = np.sum(np.square((y - fn(x, *par)) / dy)) / dof

    # inflate parameters with fixed values
    if did_fixed:

        # inflate parameters
        par_inflated = np.zeros(npar)
        par_inflated[fixed] = p0[fixed]
        par_inflated[~fixed] = par
        par = par_inflated

        # inflate cov matrix with NaN
        nfixed_flat = np.concatenate(np.outer(~fixed, ~fixed))
        c_inflated = np.full(npar**2, np.nan)
        c_inflated[nfixed_flat] = np.concatenate(cov)
        cov = c_inflated.reshape(npar, -1)

    return (par, cov, chi)
Example #16
0
def fit_list(runs,
             years,
             fnlist,
             omit=None,
             rebin=None,
             sharelist=None,
             npar=-1,
             hist_select='',
             xlims=None,
             asym_mode='c',
             fixed=None,
             **kwargs):
    """
        Fit combined asymetry from bdata.
    
        runs:           list of run numbers
        
        years:          list of years corresponding to run numbers, or int which applies to all
        
        fnlist:         list of function handles to fit (or single which applies to all)
                        must specify inputs explicitly (do not do def fn(*par)!)
                        must have len(fn) = len(runs) if list
        
        omit:           list of strings of space-separated bin ranges to omit
        rebin:          list of rebinning of data prior to fitting. 
        
        sharelist:      list of bool to indicate which parameters are shared. 
                        True if shared
                        len = number of parameters.
        
        npar:           number of free parameters in each fitting function.
                        Set if number of parameters is not intuitable from 
                            function code.      
        
        hist_select:    string for selecting histograms to use in asym calc
        
        xlims:          list of 2-tuple for (low,high) bounds on fitting range 
                            based on x values
        
        asym_mode:      input for asymmetry calculation type 
                            c: combined helicity
                            h: split helicity
                            
                        For 2e mode, prefix with:
                            sl_: combined timebins using slopes
                            dif_: combined timebins using differences
                            raw_: raw time-resolved 
                            
                            ex: sl_c or raw_h or dif_c
        
        fixed:          list of booleans indicating if the paramter is to be 
                        fixed to p0 value (same length as p0). Returns best 
                        parameters in order presented, with the fixed 
                        parameters omitted. Can be a list of lists with one list 
                        for each run.
        
        kwargs:         keyword arguments for curve_fit. See curve_fit docs. 
        
        Returns: (par,cov,ch,gchi)
            par: best fit parameters
            cov: covariance matrix
            chi: chisquared of fits
            gchi:global chisquared of fits
    """

    nruns = len(runs)

    # get fnlist
    if not isinstance(fnlist, collections.Iterable):
        fnlist = [fnlist]

    # get number of parameters
    if npar < 0:
        npar = fnlist[0].__code__.co_argcount - 1

    # get fnlist again
    fnlist.extend([fnlist[-1] for i in range(nruns - len(fnlist))])

    # get sharelist
    if sharelist is None:
        sharelist = np.zeros(npar, dtype=bool)

    # get omit
    if omit is None:
        omit = [''] * nruns
    elif len(omit) < nruns:
        omit = np.concatenate(omit, [''] * (nruns - len(omit)))

    # get rebin
    if rebin is None:
        rebin = np.ones(nruns)
    elif type(rebin) is int:
        rebin = np.ones(nruns) * rebin
    elif len(rebin) < nruns:
        rebin = np.concatenate((rebin, np.ones(nruns - len(rebin))))

    rebin = np.asarray(rebin).astype(int)

    # get years
    if type(years) in (int, float):
        years = np.ones(nruns, dtype=int) * years

    # get p0 list
    if 'p0' in kwargs.keys():
        p0 = kwargs['p0']
        del kwargs['p0']
    else:
        p0 = [np.ones(npar)] * nruns

    # fit globally -----------------------------------------------------------
    if any(sharelist) and len(runs) > 1:
        print('Running shared parameter fitting...')
        g = global_bdata_fitter(runs,
                                years,
                                fnlist,
                                sharelist,
                                npar,
                                xlims,
                                asym_mode=asym_mode,
                                rebin=rebin,
                                fixed=fixed)
        g.fit(p0=p0, **kwargs)
        gchi, chis = g.get_chi()  # returns global chi, individual chi
        pars, covs = g.get_par()

    # fit runs individually --------------------------------------------------
    else:

        # get bounds
        if 'bounds' in kwargs.keys():
            bounds = kwargs['bounds']
            del kwargs['bounds']
        else:
            bounds = [(-np.inf, np.inf)] * nruns

        # check p0 dimensionality
        if len(np.asarray(p0).shape) < 2:
            p0 = [p0] * nruns

        # check xlims shape - should match number of runs
        if xlims is None:
            xlims = [None] * nruns
        elif len(np.asarray(xlims).shape) < 2:
            xlims = [xlims for i in range(nruns)]
        else:
            xlims = list(xlims)
            xlims.extend([xlims[-1] for i in range(len(runs) - len(xlims))])

        # check fixed shape
        if fixed is not None:
            fixed = np.asarray(fixed)
            if len(fixed.shape) < 2:
                fixed = [fixed] * nruns
        else:
            fixed = [[False] * npar] * nruns

        pars = []
        covs = []
        chis = []
        gchi = 0.
        dof = 0.

        iter_obj = tqdm(zip(runs, years, fnlist, omit, rebin, p0, bounds,
                            xlims, fixed),
                        total=len(runs),
                        desc='Independent Fitting')
        for r, yr, fn, om, re, p, b, xl, fix in iter_obj:

            # get data for chisq calculations
            x, y, dy = _get_asym(bdata(r, year=yr),
                                 asym_mode,
                                 rebin=re,
                                 omit=om)

            # get x limits
            if xl is None:
                xl = [-np.inf, np.inf]
            else:
                if xl[0] is None: xl[0] = -np.inf
                if xl[1] is None: xl[1] = np.inf

            # get good data
            idx = (xl[0] < x) * (x < xl[1]) * (dy != 0)
            x = x[idx]
            y = y[idx]
            dy = dy[idx]

            # trivial case: all parameters fixed
            if all(fix):
                lenp = len(p)
                s = np.full((lenp, lenp), np.nan)
                c = np.sum(np.square((y - fn(x, *p)) / dy)) / len(y)

            # fit with free parameters
            else:
                p, s, c = fit_single(r,
                                     yr,
                                     fn,
                                     om,
                                     re,
                                     hist_select,
                                     p0=p,
                                     bounds=b,
                                     xlim=xl,
                                     asym_mode=asym_mode,
                                     fixed=fix,
                                     **kwargs)
            # outputs
            pars.append(p)
            covs.append(s)
            chis.append(c)

            # get global chi
            gchi += np.sum(np.square((y - fn(x, *p)) / dy))
            dof += len(x) - len(p)
        gchi /= dof

    pars = np.asarray(pars)
    covs = np.asarray(covs)
    chis = np.asarray(chis)

    return (pars, covs, chis, gchi)