Exemple #1
0
 def _read_mdict(self, fh, get_n, attr_dict, attr_name, obj_class):
     """
         Read all of a type of objects from MUD file and set its attributes, 
         place in mdict. 
         
         fh:         file header
         get_n:      mudpy function which gets the number of objects in the file
         attr_dict:  dictionary which links attribute name and mudpy function
         attr_name:  main attribute name. Ex: hist, scaler, or ivar
         obj_class:  object class to make
     """
     try:
         n = get_n(fh)[1]
     except RuntimeError:
         pass
     else:
         setattr(self, attr_name, mdict())
         for i in range(1, n+1):
             
             obj = obj_class()
             obj.id_number = i
             
             for attr, func_name in attr_dict.items():
                 func = getattr(mud, 'get_'+func_name)
                 try:
                     setattr(obj, attr, func(fh, i))
                 except RuntimeError:
                     pass
             getattr(self, attr_name)[obj.title] = obj
Exemple #2
0
    def __init__(self, bdata_list):
        """
            bdata_list:               list of bdata objects
        """

        # sort by run number
        runs = [b.run for b in bdata_list]
        idx = np.argsort(runs)
        bdata_list = np.array(bdata_list)[idx]
        runs = np.array(runs)[idx]
        years = np.array([b.year for b in bdata_list])

        # set some common parameters
        for key in ('apparatus', 'area', 'das', 'description', 'duration',
                    'end_date', 'end_time', 'exp', 'experimenter', 'lab',
                    'method', 'mode', 'orientation', 'sample', 'start_date',
                    'start_time', 'title'):

            x = np.array([getattr(b, key) for b in bdata_list])
            setattr(self, key, self._combine_values(key, x))

        # set the run number and year
        self.run = int(''.join(map(str, runs)))
        self.year = int(''.join(map(str, years)))

        # set ppg, camp, and epics
        for top in ('ppg', 'epics', 'camp'):

            d = mdict()

            keys = list(getattr(bdata_list[0], top).keys())
            x = mlist([getattr(b, top) for b in bdata_list])
            for key in keys:
                d[key] = self._combine_var(x[key])

            setattr(self, top, d)

        # combine the histograms
        self._combine_hist(bdata_list)

        # checks
        if '2' in self.mode:
            dwelltime = np.array([b.ppg.dwelltime.mean for b in bdata_list])
            beam_off = np.array([b.ppg.beam_off.mean for b in bdata_list])
            beam_on = np.array([b.ppg.beam_on.mean for b in bdata_list])

            if any(dwelltime[0] != dwelltime) or any(beam_off[0] != beam_off) \
                or any(beam_on[0] != beam_on):
                raise RuntimeError('%s run has varying ppg ' % self.mode+\
                    'parameters and dwelltimes. Cannot combine histograms.')
Exemple #3
0
 def __init__(self, filename=''):
     """
         Constructor. Reads file or sets file up for writing.
         
         filename: string, path to file to read. If blank, make empty object.
     """
     
     # read
     if filename:
         self._read_file(filename)
     
     # set up for writing
     else:
         for attr in self.default_attributes:
             setattr(self, attr, None)
         for attr in ('hist', 'sclr', 'ivar'):
             setattr(self, attr, mdict())
Exemple #4
0
    def _combine_hist(self, bdata_list):
        """
            Apply np.sum to base histograms and set result to top level
            
            Scans are concatenated. 
            SLR histograms are summed.
        """

        hist = mlist([b.hist for b in bdata_list])

        # these will get combined
        hist_names = ('F+', 'F-', 'B+', 'B-', 'L+', 'L-', 'R+', 'R-', 'NBMF+',
                      'NBMF-', 'NBMB+', 'NBMB-')

        # these modes require appending scans rather than averaging
        hist_xnames = (
            'Frequency',
            'x parameter',
        )

        # make container
        hist_joined = mdict()

        # check if x values in histogram
        do_append = any([h in hist[0] for h in hist_xnames])

        # get x histogram name
        if do_append:
            for xname in hist_xnames:
                if xname in hist[0]:
                    break

        for name in hist[0].keys():

            # no rule for combining histogram
            if (name not in hist_names) and (name not in hist_xnames): continue

            # make the object
            hist_obj = mhist()

            # combine scan-less runs (just add the histogrms)
            if not do_append:
                hist_obj.data = np.sum(list(hist[name].data), axis=0)

            # combine runs with scans (append the data)
            else:
                hist_obj.data = np.concatenate(hist[name].data)

            # set common histogram attributes
            hist_obj.title = name

            for key in ('background1', 'background2', 'n_events', 'n_bytes'):
                setattr(hist_obj, key, int(np.sum(getattr(hist[name], key))))

            for key in ('id_number', 'n_bins', 'good_bin1', 'good_bin2',
                        't0_bin', 't0_ps', 's_per_bin', 'fs_per_bin', 'htype'):

                item = getattr(hist[name], key)
                if all(item[0] == item):
                    setattr(hist_obj, key, item[0])
                else:
                    setattr(hist_obj, key, np.nan)

            # save in dictionary
            hist_joined[name] = hist_obj

        self.hist = hist_joined
Exemple #5
0
    def asym_mean(self,*asym_args,**asym_kwargs):
        """
            Get individual asymmetries first, then combine with weighted mean
            
            asym_args: dict, passed to bdata.asym. 
        """
    
        # calcuate asymmetries
        asym_list = [b.asym(*asym_args,**asym_kwargs) for b in self.data]
        
        # make into dataframes, get errors as weights
        for i in range(len(asym_list)):
            asym = asym_list[i]
            
            # tuple return: (x,a,da)
            if type(asym) is np.ndarray:
                asym_list[i] = pd.DataFrame({'x':asym[0],'a':asym[1],'da':asym[2]})
            
            # dict return: (x,p:(a,da),...)
            elif type(asym) is mdict:
                
                # if entry is tuple, split into error and value
                klist = list(asym.keys())
                for k in klist:
                    if type(asym[k]) is tuple:
                        asym['d'+k] = asym[k][1]
                        asym[k] = asym[k][0]
                    else:
                        asym['x'] = asym[k]
                        del asym[k]
                        xk = k
                
                # make into data frame
                asym_list[i] = pd.DataFrame(asym)
        
        # combine the data frames and set index
        df = pd.concat(asym_list).set_index('x')
        
        # slice into errors and values
        values = pd.DataFrame(df[[c for c in df.columns if 'd' not in c]])
        errors = pd.DataFrame(df[[c for c in df.columns if 'd' in c]])
        
        # rename error columns
        errors.rename(columns={c:c.replace('d','') for c in errors.columns},
                      inplace=True)
        
        # make errors weights
        errors = 1/errors.apply(np.square)
        
        # weight the values
        values = values * errors
        
        # group and sum 
        values = values.groupby(level=0).sum()
        errors = errors.groupby(level=0).sum()

        # weighted mean
        values = values / errors
        errors = 1/errors.apply(np.sqrt)
        
        # make output type the same as the original 
        if type(asym) is np.ndarray:
            return np.array([values.index.values,
                             values.values.T[0],
                             errors.values.T[0]])
        
        elif type(asym) is mdict:
            out = mdict()
            out[xk] = values.index
            for c in values.columns:
                out[c] = (values[c].values,errors[c].values)
        
            return out