예제 #1
0
 def __init__(self,
              name,
              value=0,
              expr=None,
              pmin=None,
              pmax=None,
              overall=False,
              delta=0,
              error=0,
              correl=None,
              initexpr=None,
              finalize=lambda x: x):
     if not id_re.match(name):
         raise UFitError('Parameter name %r is not a valid Python '
                         'identifier' % name)
     if name in expr_namespace:
         raise UFitError('Parameter name %r is reserved' % name)
     self.name = name
     self.value = value
     self.expr = expr
     self.initexpr = initexpr
     self.pmin = pmin
     self.pmax = pmax
     # true if a global parameter for a global fit
     self.overall = overall
     # transform parameter after successful fit
     self.finalize = finalize
     # for backends that support setting parameter increments
     self.delta = delta
     # properties set on fit result
     self.error = error
     self.correl = correl or {}
예제 #2
0
 def colindex(col):
     if isinstance(col, string_types):
         try:
             return colnames.index(col)
         except ValueError:
             raise UFitError('No such data column: %s' % col)
     elif 1 <= col <= len(colnames):
         return col - 1  # 1-based indices
     else:
         raise UFitError('Data has only %d columns (but column %s is '
                         'requested)' % (len(colnames), col))
예제 #3
0
def _nicos_common_load(fp, colnames, colunits, meta, comments):
    def convert_value(s):
        try:
            return float(s)
        except ValueError:
            return 0.0
    cvdict = dict((i, convert_value) for i in range(len(colnames))
                  if colnames[i] != ';')
    colnames = [name for name in colnames if name != ';']
    colunits = [unit for unit in colunits if unit != ';']
    usecols = list(cvdict)
    coldata = loadtxt(fp, converters=cvdict, usecols=usecols, ndmin=2,
                      comments=comments)
    if not coldata.size:
        raise UFitError('empty data file')
    cols = dict((name, coldata[:, i]) for (i, name) in enumerate(colnames))
    meta['environment'] = []
    for col in cols:
        meta[col] = cols[col].mean()
    for tcol in ['Ts', 'sT', 'T_ccr5_A', 'T_ccr5_B', 'sensor1']:
        if tcol in cols:
            meta['environment'].append('T = %.3f K' % meta[tcol])
            break
    if 'B' in cols:
        meta['environment'].append('B = %.3f K' % meta['B'])
    qhindex = _hkle_index(colnames)
    if qhindex > -1:
        meta['hkle'] = coldata[:, qhindex:qhindex+4]
        deviations = array([cs.max() - cs.min()
                            for cs in coldata.T[qhindex:qhindex+4]])
        xg = colnames[qhindex + deviations.argmax()]
        meta['hkle_vary'] = xg
    return colnames, coldata, meta
예제 #4
0
def set_backend(which):
    """Select a new backend for fitting."""
    global backend
    backend = globals()[which]
    if backend is None:
        raise UFitError('Backend %r is not available' % which)
    print('ufit using %s backend' % backend.backend_name)
예제 #5
0
 def from_init(cls, name, pdef):
     if isinstance(pdef, cls):
         return pdef
     self = cls(name)
     while not isinstance(pdef, (number_types, string_types)):
         if isinstance(pdef, overall):
             self.overall = True
             pdef = pdef.v
         elif isinstance(pdef, datapar):
             self.expr = 'data.' + pdef.v
             pdef = 0
         elif isinstance(pdef, datainit):
             self.initexpr = 'data.' + pdef.v
             pdef = 0
         elif isinstance(pdef, delta):
             self.delta = pdef.delta
             pdef = pdef.v
         elif isinstance(pdef, tuple) and len(pdef) == 3:
             self.pmin, self.pmax, pdef = pdef
         else:
             raise UFitError('Parameter definition %s not understood' %
                             pdef)
     if isinstance(pdef, string_types):
         self.expr = pdef
     else:
         self.value = pdef
     return self
예제 #6
0
 def load(self, filename):
     self.clear()
     # unpickle everything
     with open(filename, 'rb') as fp:
         if six.PY3:
             info = pickle.load(fp, encoding='latin1')
         else:
             info = pickle.load(fp)
     # load with the respective method
     savever = info.get('version', 0)
     try:
         getattr(self, '_load_v%d' % savever)(info)
     except AttributeError:
         raise UFitError('save version %d not supported' % savever)
     self.filename = filename
     # reassign indices (also to regenerate descriptions)
     for group in self.groups:
         for i, item in enumerate(group.items):
             item.set_group(group, i + 1)
             item.after_load()
             self.all_items.add(item)
         group.update_htmldesc()
     # let GUI elements update from propsdata
     self.itemsUpdated.emit()
     self.propsUpdated.emit()
     self.filenameChanged.emit()
예제 #7
0
    def load_numors(self,
                    nstring,
                    binsize,
                    xcol,
                    ycol,
                    dycol=None,
                    ncol=None,
                    nscale=1,
                    floatmerge=True,
                    filter=None):
        """Load a number of data files and merge them according to numor
        list operations:

        * ``,`` - put single files in individual data sets
        * ``-`` - put sequential files in individual data sets
        * ``+`` - merge single files
        * ``>`` - merge sequential files
        """
        if not isinstance(binsize, number_types):
            raise UFitError('binsize argument must be a number')

        def toint(a):
            try:
                return int(a)
            except ValueError:
                raise UFitError('Invalid file number: %r' % a)

        # operator "precedence": ',' has lowest, then '+',
        # then '-' and '>' (equal)
        parts1 = nstring.split(',')
        datasets = []
        for part1 in parts1:
            if '-' in part1:
                a, b = map(toint, part1.split('-'))
                datasets.extend(
                    self.load(n, xcol, ycol, dycol, ncol, nscale,
                              filter).merge(binsize, floatmerge=floatmerge)
                    for n in range(a, b + 1))
            else:
                parts2 = part1.split('+')
                inner = []
                for part2 in parts2:
                    if '>' in part2:
                        a, b = map(toint, part2.split('>'))
                        ds = [
                            self.load(n, xcol, ycol, dycol, ncol, nscale,
                                      filter) for n in range(a, b + 1)
                        ]
                        inner.append(ds[0].merge(binsize,
                                                 *ds[1:],
                                                 floatmerge=floatmerge))
                    else:
                        inner.append(
                            self.load(toint(part2), xcol, ycol, dycol, ncol,
                                      nscale, filter))
                datasets.append(inner[0].merge(binsize,
                                               *inner[1:],
                                               floatmerge=floatmerge))
        return DatasetList(datasets)
예제 #8
0
def read_data(filename, fp):
    fp = io.TextIOWrapper(fp, 'ascii', 'ignore')
    meta = {}
    dtline = fp.readline()
    if not dtline.startswith('### NICOS data file'):
        raise UFitError('%r does not appear to be a NICOS data file' %
                        filename)
    ctime = time.mktime(time.strptime(
        dtline[len('### NICOS data file, created at '):].strip(),
        '%Y-%m-%d %H:%M:%S'))
    meta['created'] = ctime
    remark = ''
    for line in iter(fp.readline, ''):
        if line.startswith('### Scan data'):
            break
        if line.startswith('# '):
            items = line.strip().split(None, 3)
            try:
                oval, unit = items[3].split(None, 1)
                val = float(oval)
            except (IndexError, ValueError):
                try:
                    oval = items[3]
                    val = float(oval)
                except ValueError:
                    val = items[3]
                except IndexError:
                    continue
                unit = None
            key = items[1]
            if key.endswith(('_offset', '_precision')):
                # we don't need these for fitting
                continue
            if key.endswith('_value'):
                key = key[:-6]
            if key.endswith('_instrument'):
                meta['instrument'] = oval.lower()
                continue
            elif key.endswith('_proposal'):
                meta['experiment'] = oval.lower()
            elif key.endswith('_samplename'):
                meta['title'] = oval
            elif key.endswith('_remark'):
                remark = oval
            elif key == 'number':
                meta['filenumber'] = int(oval)
                continue
            elif key == 'info':
                meta['subtitle'] = val
                continue
            meta[key] = val
    if remark and 'title' in meta:
        meta['title'] += ', ' + remark
    colnames = fp.readline()[1:].split()
    colunits = fp.readline()[1:].split()
    return _nicos_common_load(fp, colnames, colunits, meta, '#')
예제 #9
0
 def load(self,
          n,
          xcol,
          ycol,
          dycol=None,
          ncol=None,
          nscale=1,
          filter=None):
     try:
         return self._inner_load(n, xcol, ycol, dycol, ncol, nscale, filter)
     except Exception as e:
         raise UFitError('Could not load data file %d: %s' % (n, e))
예제 #10
0
 def _get_reader(self, filename, fobj):
     from ufit.data import data_formats, data_formats_image
     if self.format == 'auto':
         for n, m in iteritems(data_formats):
             # check 'simple' formats last
             if not n.startswith('simple') and m.check_data(fobj):
                 return m, n in data_formats_image
         for n, m in iteritems(data_formats):
             if n.startswith('simple') and m.check_data(fobj):
                 return m, n in data_formats_image
         raise UFitError('File %r has no recognized file format' % filename)
     return data_formats[self.format], self.format in data_formats_image
예제 #11
0
 def save(self):
     # let GUI elements update the stored propsdata
     self.propsRequested.emit()
     if self.filename is None:
         raise UFitError('session has no filename yet')
     info = {
         'version':  SAVE_VERSION,
         'groups':   self.groups,
         'props':    self.props,
     }
     with open(self.filename, 'wb') as fp:
         pickle.dump(info, fp, protocol=pickle.HIGHEST_PROTOCOL)
     self.dirtyChanged.emit(False)
예제 #12
0
def prepare_params(params, meta):
    # find parameters that need to vary
    dependent = {}
    varying = []
    varynames = []
    for p in params:
        if p.initexpr:
            try:
                p.value = param_eval(p.initexpr, {'data': meta})
            except Exception:
                pass  # can happen for heterogeneous data collections
        if p.expr:
            dependent[p.name] = [p.expr, None]
        else:
            varying.append(p)
            varynames.append(p.name)

    pd = dict((p.name, p.value) for p in varying)
    pd.update(expr_namespace)
    pd['data'] = meta

    # poor man's dependency tracking of parameter expressions
    dep_order = []
    maxit = len(dependent) + 1
    while dependent:
        maxit -= 1
        if maxit == 0:
            s = '\n'.join('   %s: %s' % (k, v[1])
                          for (k, v) in iteritems(dependent))
            raise UFitError('Detected unresolved parameter dependencies:\n' +
                            s)
        for p, (expr, _) in listitems(dependent):  # dictionary will change
            try:
                pd[p] = param_eval(expr, pd)
            except NameError as e:
                dependent[p][1] = str(e)
            except AttributeError as e:
                dependent[p][1] = 'depends on data.' + str(e)
            else:
                del dependent[p]
                dep_order.append((p, expr))
    # pd.pop('__builtins__', None)

    return varying, varynames, dep_order, pd
예제 #13
0
def set_dataformat(format):
    """Set the input data format.

    Normally ufit autodetects file formats, but this can be overridden using
    this function.  Data formats are:

    * ``'ill'`` - ILL TAS data format
    * ``'llb'`` - LLB binary TAS data format (known working for data from 1T and
      4F unpolarized)
    * ``'nicos'`` - NICOS data format
    * ``'old nicos'`` - NICOS 1.0 data format
    * ``'trisp'`` - FRM-II TRISP data format
    * ``'taipan'`` - ANSTO Taipan data format
    * ``'nist'`` - NIST data format
    * ``'simple'`` - simple whitespace-separated multi-column files
    * ``'simple comma-separated'`` - simple comma-separated multi-column files
    """
    if format not in data_formats:
        raise UFitError('Unknown data format: %r, available formats are %s' %
                        (format, ', '.join(data_formats)))
    global_loader.format = format
예제 #14
0
 def toint(a):
     try:
         return int(a)
     except ValueError:
         raise UFitError('Invalid file number: %r' % a)
예제 #15
0
def read_data(filename, fp):
    fp = io.TextIOWrapper(fp, 'ascii', 'ignore')
    meta = {}
    first_pos = fp.tell()
    dtline = fp.readline()
    fp.seek(first_pos)
    if not dtline.startswith('filename'):
        raise UFitError('%r does not appear to be an old NICOS data file' %
                        filename)
    for line in iter(fp.readline, ''):
        # finished, go for data
        if line.startswith('scan data'):
            break
        # skip these lines
        if line.startswith(('***', '[', 'Sample information',
                            'instrument general setup at file creation',
                            'offsets of main axes')):
            continue
        # POLARIZATION, not implemented:
        # if line.startswith('counting for switching devices'): # polarized measurements!
        #    self.pol_devices = [d.strip() for d in line.split(']')[0].split('[')[1].split(', ')]
        #    self.pol_states = []
        #    self.polarized = True
        #    for s in line.split('states ')[1][1:-1].split('], ['):
        #        s = [d.strip() for d in s.split(', ')]
        #        ts = {}
        #        for i in range(len(self.pol_devices)):
        #            ts[self.pol_devices[i]] = s[i]
        #        self.pol_states.append(ts)

        try:
            key, value = line.split(':', 1)
        except ValueError:
            print('ignored line: %r' % line)
            continue
        key = key.strip()
        value = value.strip()

        # some values are not important
        if key in blacklist:
            continue

        # some value names should be mapped
        if key in mapping:
            key = mapping[key]

        parts = value.split()
        if not parts:
            continue
        if key in ('ss1', 'ss2'):
            try:
                for i, side in enumerate(('left', 'right', 'bottom', 'top')):
                    meta['%s_%s' % (key, side)] = float(parts[i])
                meta[key] = tuple(float(b) for b in parts[:4])
            except Exception:
                continue
        elif value.endswith(('mm', 'deg', 'deg.', 'A-1', 'THz', 'meV', 'T',
                             'K', 'bar', '%', 's', 'min', 'min.', 'A')):
            try:
                meta[key] = float(parts[0])
            except ValueError:
                meta[key] = parts[0]
        else:
            meta[key] = value

    # convert some values
    if 'created' in meta:
        meta['created'] = time.mktime(
            time.strptime(meta['created'], '%m/%d/%Y %H:%M:%S'))
    if 'filename' in meta:
        meta['filename'] = meta['filename'].strip("'")
        meta['filenumber'] = int(meta['filename'].split("_")[1])

    # read data
    meta['subtitle'] = fp.readline().strip()
    colnames = fp.readline().split()
    colunits = fp.readline().split()

    return _nicos_common_load(fp, colnames, colunits, meta, '*')
예제 #16
0
def rebin(data, binsize, meta={}):
    """Simple rebinning of (x, y, dy, n) data and col_ meta."""

    if binsize == 0:
        # no merging, just concatenate
        return data

    x, y, dy, n = data.T
    # copy meta
    new_meta = meta.copy()
    # identify columns
    metanames = []
    if meta != []:
        for col in meta:
            if not col.startswith('col_'):
                continue
            # add column to data
            metanames.append(col)

    # calculate new x values

    halfbinsize = binsize/2.
    stops = arange(x.min() - (x.min() % binsize) - binsize,
                   x.max() - (x.max() % binsize) + 2*binsize,
                   binsize) + halfbinsize
    nbins = len(stops)

    # newarray will be the new x, y, dy, n and meta columns array
    newarray = zeros((nbins, 4 + len(metanames)))
    newarray[:, 0] = stops

    # this will keep track which data values we already used
    data_unused = ones(len(x), bool)

    # this will keep track which new bins are used; unused ones are
    # left out
    new_used = ones(nbins, bool)

    for i in range(nbins):
        stop = newarray[i, 0]
        # get indices of all data points with x values lying below stop
        indices = x <= (stop + halfbinsize)
        # remove indices of data already used in previous bins
        indices &= data_unused
        if indices.any():
            newarray[i, 1] += y[indices].sum()
            newarray[i, 2] += sqrt((dy[indices]**2).sum())
            newarray[i, 3] += n[indices].sum()
            data_unused[indices] = False
            for j, m in enumerate(metanames):
                newarray[i, 4 + j] += meta[m][indices].sum() / sum(indices)
        else:
            new_used[i] = False
    # are there any data points left unused?
    if data_unused.any():
        raise UFitError('Merging data failed (data left over), check merging '
                        'algorithm for bugs')

    # remove any stops without monitor data
    newarray = newarray[new_used]

    # extract merged meta information
    for i, m in enumerate(metanames):
        new_meta[m] = array(newarray[:,4 + i])
    # return arrays
    return array(newarray[:,:4]), new_meta
예제 #17
0
def read_data(filename, fp):
    fp = io.TextIOWrapper(fp, 'ascii', 'ignore')
    line = ''
    meta = {}
    while line.strip() != 'DATA_:':
        if line.startswith('IIIIIIIIIIIIIIIIIII'):
            # D23 format
            fp.seek(0, 0)
            return read_data_d23(filename, fp)
        if line.startswith('COMND:'):
            meta['subtitle'] = ' '.join(line[7:].rstrip().lower().split())
        elif line.startswith('TITLE:'):
            meta['title'] = line[6:].strip()
        elif line.startswith('FILE_:'):
            meta['filenumber'] = int(line[6:].strip())
        elif line.startswith('PARAM:'):
            parts = line[6:].strip().rstrip(',').split(',')
            for part in parts:
                k, s = part.split('=')
                meta[k.strip()] = float(s.strip())
        elif line.startswith('INSTR:'):
            meta['instrument'] = line[6:].strip().lower()
        elif line.startswith('EXPNO:'):
            meta['experiment'] = line[6:].strip().lower()
        line = fp.readline()
        if not line:
            break
    all_names = fp.readline().split()
    if not all_names:
        raise UFitError('No data columns found in in file %r' % filename)
    usecols = []
    names = []
    for i, name in enumerate(all_names):
        # XXX have to do flipper handling right
        if name in ('PNT', 'F1', 'F2'):
            continue
        names.append(name)
        usecols.append(i)
    # Berlin implementation adds "Finished ..." in the last line,
    # pretend that it is a comment
    with catch_warnings(record=True) as warnings:
        arr = atleast_2d(
            genfromtxt(iter(lambda: fp.readline().encode(), b''),
                       usecols=usecols,
                       comments='F',
                       invalid_raise=False))
    for warning in warnings:
        print('!!! %s' % warning.message)
    for i, n in enumerate(names):
        meta[n] = arr[:, i].mean()
    meta['environment'] = []
    if 'TT' in meta:
        meta['environment'].append('T = %.3f K' % meta['TT'])
    if 'MAG' in meta:
        meta['environment'].append('B = %.5f T' % meta['MAG'])
    if names[3] == 'EN':
        meta['hkle'] = arr[:, :4]
        deviations = array([(cs.max() - cs.min()) for cs in arr.T[:4]])
        meta['hkle_vary'] = ['h', 'k', 'l', 'E'][deviations.argmax()]
    elif names[0] == 'QH':  # 2-axis mode
        meta['hkle'] = arr[:, :3]
        meta['hkle'] = array([(h, k, l, 0) for (h, k, l) in meta['hkle']])
        deviations = array([(cs.max() - cs.min()) for cs in arr.T[:4]])
        meta['hkle_vary'] = ['h', 'k', 'l', 'E'][deviations.argmax()]
    if len(arr) == 0:
        raise UFitError('No data found in file %r' % filename)
    return names, arr, meta
예제 #18
0
def read_data_d23(filename, fp):
    line = fp.readline()
    meta = {'instrument': 'D23'}
    imeta = {}
    arr = []
    after_head = False
    scantype = None
    section = 0
    while True:
        if len(line) == 81 and len(set(line)) == 2:
            # heading
            after_head = True
            section += 1
        elif after_head:
            if section == 1:
                meta['filenumber'] = int(line.split()[0])
            elif section == 2:
                # experiment user
                fp.readline()
                line = fp.readline()
                meta['experiment'] = line.strip('\x00').split()[0]
            elif section == 3:
                # experiment title and "scan type" (can be empty)
                fp.readline()
                line = fp.readline()
                fields = line.split()
                meta['title'] = fields[0]
                if len(fields) > 1:
                    scantype = fields[1]
            elif section == 4:
                # integral metadata, only relevant for interpreting the rest
                nlines = int(line.split()[1])
                valnames = []
                values = []
                for _ in range(nlines):
                    valnames.extend(fp.readline().split())
                for _ in range(nlines):
                    values.extend(map(int, fp.readline().split()))
                imeta = dict(zip(valnames, values))
            elif section == 5:
                # experimental metadata, take all
                nlines = int(line.split()[1])
                valnames = []
                values = []
                for _ in range(nlines):
                    valnames.extend(x
                                    for x in re.split(' {2,}',
                                                      fp.readline().strip())
                                    if x)
                for _ in range(nlines):
                    values.extend(map(float, fp.readline().split()))
                for (k, v) in zip(valnames, values):
                    meta[k] = v
            elif section == 7:
                # data follows; must be #points * (#detvals + #angles)
                ncols = 3 + imeta['nbang']
                if int(line.split()[0]) != imeta['nkmes'] * ncols:
                    raise ValueError('invalid number of data items')
                line = fp.readline()
                data = []
                while line:
                    data.extend(map(float, line.split()))
                    line = fp.readline()
                arr = array(data).reshape((imeta['nkmes'], ncols))
                break
            after_head = False
        line = fp.readline()
        if not line:
            break

    # check if we have a supplementary .dat file to get the HKL values out
    # (the original file doesn't contain it, and we don't want to implement
    # the UB matrix to calculate it here)
    hkl = None
    if scantype != 'omega' and path.isfile(filename + '.dat'):
        hkl = []
        fp1 = io.open(filename + '.dat', 'rb')
        fp1 = io.TextIOWrapper(fp1, 'ascii', 'ignore')
        for line in fp1:
            hkl.append([float(x) for x in line.split()[:3]])
        hkl = array(hkl)
        meta['hkle'] = array([(h, k, l, 0) for (h, k, l) in hkl])
        deviations = array([(cs.max() - cs.min()) for cs in arr.T[:4]])
        meta['hkle_vary'] = ['h', 'k', 'l', 'E'][deviations.argmax()]

    # reshuffle columns to place the detector last
    arrindices = array(list(range(3, arr.shape[1])) + [1, 2, 0])
    arr = arr[:, arrindices]

    if hkl is not None:
        names = ['QH', 'QK', 'QL']
        arr = concatenate([hkl, arr], 1)
    else:
        names = []
    for i in range(1, 8):
        coltype = imeta['icdesc%d' % i]
        if coltype == 0:
            break
        names.append({
            1: 'GAMMA',
            2: 'OMEGA',
            5: 'CHI',
            -1: 'T',
            -6: 'B',
        }.get(coltype, 'COL_%d' % coltype))
    names.extend(['M1', 'M2', 'CNTS'])
    for i, n in enumerate(names):
        meta[n] = arr[:, i].mean()
    meta['environment'] = []
    if 'Temp-sample' in meta:
        meta['environment'].append('T = %.3f K' % meta['Temp-sample'])
    if 'Mag.field' in meta:
        meta['environment'].append('B = %.5f T' % meta['Mag.field'])

    if len(arr) == 0:
        raise UFitError('No data found in file %r' % filename)
    return names, arr, meta
예제 #19
0
def read_data(filename, fp):
    fp = io.TextIOWrapper(fp, 'ascii', 'ignore')
    line = ''
    meta = {}
    infofp = io.open(filename[:-4] + '.log',
                     'r',
                     encoding='ascii',
                     errors='ignore')
    # first line in scan info
    line = infofp.readline()
    meta['subtitle'] = ' '.join(line.lower().split())
    meta['title'] = ''  # nothing here
    meta['instrument'] = 'trisp'
    while not line.startswith('Limits'):
        line = infofp.readline()
        if '-----' in line:
            continue
        parts = line.split()
        try:
            if len(parts) == 2:
                meta[parts[0]] = float(parts[1])
            elif len(parts) == 3:  # encoder target, value: so take value
                meta[parts[0]] = float(parts[2])
        except ValueError:
            pass
    names = fp.readline().split()
    pal = 'pal' in names
    # file with polarization analysis?
    if not names:
        raise UFitError('No data columns found in file %r' % filename)
    usecols = list(range(len(names)))
    if names[0] == 'pnt':
        usecols = list(range(1, len(names)))
        names = names[1:]
    arr = loadtxt(fp, ndmin=2, usecols=usecols)
    for i, n in enumerate(names):
        meta[n] = arr[:, i].mean()
    meta['environment'] = []
    if 'TTA' in meta:
        meta['environment'].append('T = %.3f K' % meta['TTA'])
    if len(arr) == 0:
        raise UFitError('No data found in file %r' % filename)
    if pal:
        if 'QH' not in names:
            raise UFitError('Polarization data without QHKLE not supported')
        nfixed = names.index('E')  # fixed columns (same for all PA points)
        pal_values = set(arr[:, 0])
        npal = len(pal_values)  # number of PA points
        names_new = names[1:nfixed + 1]
        nvary = arr.shape[1] - nfixed - 1  # without pal and fixed columns
        arr_new = zeros((arr.shape[0] // npal, nfixed + nvary * npal))
        for pal_value in sorted(pal_values):
            for name in names[nfixed + 1:]:
                names_new.append(name + '_%d' % pal_value)
            arr_new[:, nfixed+(pal_value-1)*nvary:nfixed+pal_value*nvary] = \
                arr[pal_value-1::npal, nfixed+1:]
        # take fixed points from first PA point
        arr_new[:, :nfixed] = arr[::npal, 1:nfixed + 1]
        names = names_new
        arr = arr_new
    if names[0] == 'QH':
        meta['hkle'] = arr[:, :4]
        deviations = array([(cs.max() - cs.min()) for cs in arr.T[:4]])
        meta['hkle_vary'] = ['h', 'k', 'l', 'E'][deviations.argmax()]
    return names, arr, meta
예제 #20
0
    def _inner_load_scan(self, rdr, filename, fobj, n, xcol, ycol, dycol, ncol,
                         nscale, filter):
        colnames, coldata, meta = rdr.read_data(filename, fobj)
        if filter is not None:
            for v, k in filter.items():
                if v in colnames:
                    coldata = coldata[coldata.T[colnames.index(v)] == k]
                else:
                    raise UFitError("Filtered column %s did not exists" % v)
        colguess = rdr.guess_cols(colnames, coldata, meta)
        if 'filenumber' not in meta:
            meta['filenumber'] = n
        meta['datafilename'] = filename
        for colname, colvalues in zip(colnames, coldata.T):
            meta['col_%s' % colname] = colvalues
        datarr = ones((len(coldata), 4))

        def colindex(col):
            if isinstance(col, string_types):
                try:
                    return colnames.index(col)
                except ValueError:
                    raise UFitError('No such data column: %s' % col)
            elif 1 <= col <= len(colnames):
                return col - 1  # 1-based indices
            else:
                raise UFitError('Data has only %d columns (but column %s is '
                                'requested)' % (len(colnames), col))

        use_hkl = False
        if xcol == 'hkl':
            xcol = 'auto'
            use_hkl = True
        if xcol == 'auto':
            xcol = colguess[0]
        datarr[:, 0] = coldata[:, colindex(xcol)]
        if ycol == 'auto':
            ycol = colguess[1]
        datarr[:, 1] = coldata[:, colindex(ycol)]
        if dycol == 'auto':
            dycol = colguess[2]
        if dycol is not None:
            datarr[:, 2] = coldata[:, colindex(dycol)]
        else:
            datarr[:, 2] = sqrt(datarr[:, 1])
        if ncol == 'auto':
            ncol = colguess[3]
        if ncol is not None:
            datarr[:, 3] = coldata[:, colindex(ncol)]
            if nscale == -1:
                nscale = int(float('%.2g' % datarr[:, 3].mean()))

        def colname(col):
            if col is None:
                return None
            elif isinstance(col, string_types):
                return col
            return colnames[col - 1]  # 1-based indices

        if use_hkl:
            meta['is_hkldata'] = True
        dset = ScanData(meta, datarr, colname(xcol), colname(ycol),
                        colname(ncol), nscale)
        if use_hkl and 'hkle' in dset.meta:  # 3-axis support
            dset.x = dset.meta['hkle']
        self.sets[n] = dset
        return dset