Exemple #1
0
def str_data(group, field, default=''):
    """
    Retrieve value of field as a string, with default if field is missing.
    """
    if field in group:
        data = group[field][0]
        if data.ndim > 0:
            value = [_s(v) for v in data]
        else:
            value = _s(data)
        return value
    else:
        return default
Exemple #2
0
def nxfind(group, nxclass):
    """
    Iterate over the entries of type *nxclass* in the hdf5 *group*.
    """
    for entry in group.values():
        if nxclass == _s(entry.attrs.get('NX_class', None)):
            yield entry
Exemple #3
0
def list_data(group, field):
    """
    Retrieve value of field as a list of strings, or []
    """
    if field in group:
        return [_s(s) for s in group[field]]
    else:
        return []
Exemple #4
0
def readUSANSNexus(input_file, file_obj=None, metadata_lookup=metadata_lookup, det_deadtime=0.0, trans_deadtime=0.0):
    """
    Load all entries from the NeXus file into sans data sets.
    """
    datasets = []
    file = h5_open_zip(input_file, file_obj)
    for entryname, entry in file.items():
        metadata = OrderedDict([
            ("run.filename", _s(entry["DAS_logs/trajectoryData/fileName"][0])),
            ("analysis.intent", _s(entry["DAS_logs/trajectoryData/intent"][0])),
            ("sample.name", _s(entry["DAS_logs/sample/name"][0])),
            ("run.filePrefix", _s(entry["DAS_logs/trajectoryData/filePrefix"][0])),
            ("run.instFileNum", int(entry["DAS_logs/trajectoryData/instFileNum"][0])),
            ("start_time", _s(entry["start_time"][0])),
            ("end_time",_s(entry["end_time"][0])),
            ("entry", _s(entryname)),
            ("dQv", 0.117), # constant of the instrument.  Should it be in the nexus def?
        ])

        counts = entry['DAS_logs/linearDetector/counts'][()]
        countTime = entry['DAS_logs/counter/liveTime'][()]
        trans_counts = entry['DAS_logs/transDetector/counts'][()]
        detCts = (counts / (1.0 - (counts*det_deadtime/countTime[:,None]))).sum(axis=1)
        transCts = (trans_counts / (1.0 - (trans_counts*trans_deadtime/countTime[:,None]))).sum(axis=1)
        monCts = entry['DAS_logs/counter/liveMonitor'][()]
        Q = entry['DAS_logs/analyzerRotation/softPosition'][()]

        dataset = USansData(metadata=metadata, countTime=countTime, detCts=detCts, transCts=transCts, monCts=monCts, Q=Q) 
        datasets.append(dataset)   
    return datasets
Exemple #5
0
    def to_column_text(self):
        from io import BytesIO
        # export to 6-column format compatible with SASVIEW
        # Data columns are Qx - Qy - I(Qx,Qy) - err(I) - Qz - SigmaQ_parall - SigmaQ_perp - fSubS(beam stop shadow)
        labels = [
            "Qx (1/A)", "I(Q) (Counts/sec/(1e6 Monitor))",
            "std. dev. I(Q) (1/cm)", "dQ (1/A)", "filler", "filler"
        ]

        with BytesIO() as fid:
            cleaned_metadata = _toDictItem(self.metadata, convert_bytes=True)
            if not isinstance(cleaned_metadata, list):
                cleaned_metadata = [cleaned_metadata]
            for c in cleaned_metadata:
                fid.write(b'### Parameters:\n')
                for k, v in c.items():
                    fid.write(_b("# %s\n" % json.dumps({k: v}).strip("{}")))
            #fid.write(_b("# %s\n" % json.dumps(_toDictItem(self.metadata, convert_bytes=True)).strip("{}")))
            fid.write(
                _b("# %s\n" % json.dumps({
                    "columns": labels
                }).strip("{}")))
            filler = np.ones_like(
                self.Q, dtype='float') * -1.0 * cleaned_metadata[0]["dQv"]
            np.savetxt(fid,
                       np.vstack([
                           self.Q, self.iqCOR.x,
                           np.sqrt(self.iqCOR.variance), filler, filler, filler
                       ]).T,
                       fmt="%15.6g")
            fid.seek(0)
            name = _s(cleaned_metadata[0]["Sample file"])
            entry = ""
            value = fid.read()

        return {
            "name": name,
            "entry": "",
            "file_suffix": ".usans.cor",
            "value": value.decode(),
        }
Exemple #6
0
def load_nexus_entries(filename,
                       file_obj=None,
                       entries=None,
                       meta_only=False,
                       entry_loader=None):
    """
    Load the summary info for all entries in a NeXus file.
    """
    handle = h5_open.h5_open_zip(filename, file_obj)
    measurements = []
    for name, entry in handle.items():
        if entries is not None and name not in entries:
            continue
        if _s(entry.attrs.get('NX_class', None)) == 'NXentry':
            data = entry_loader(entry, name, filename)
            if not meta_only:
                data.load(entry)
            measurements.append(data)
    if file_obj is None:
        handle.close()
    return measurements
Exemple #7
0
def data_as(group, fieldname, units, rep=None, NA=None, dtype=None):
    """
    Return value of field in the desired units.
    """
    if fieldname not in group:
        return NA
    field = group[fieldname]
    units_in = _s(field.attrs.get('units', ''))
    converter = unit.Converter(units_in)
    value = converter(field[()], units)
    if dtype is not None:
        value = np.asarray(value, dtype=dtype)
    if rep is not None:
        if np.isscalar(value) or len(value) == 1:
            return np.repeat(value, rep, axis=0)
        elif len(value) == rep:
            return value
        else:
            raise ValueError("field %r does not match counts in %r" %
                             (field.name, field.file.filename))
    else:
        return value
Exemple #8
0
def nexus_common(self, entry, entryname, filename):
    #print(entry['instrument'].values())
    das = entry['DAS_logs']
    self.entry = entryname
    self.path = os.path.abspath(filename)
    self.name = str_data(das, 'trajectoryData/fileName', 'unknown')
    if 'trajectoryData/fileNum' in das:
        self.filenumber = das['trajectoryData/fileNum'][0]
    else:
        # fall back to randomly generated filenum
        from random import randint
        self.filenumber = -randint(10**9, (10**10) - 1)

    #self.date = iso8601.parse_date(entry['start_time'][0].decode('utf-8'))
    self.date = iso8601.parse_date(str_data(entry, 'start_time'))
    self.description = str_data(entry, 'experiment_description')
    self.instrument = str_data(entry, 'instrument/name')

    # Determine the number of points in the scan.
    # TODO: Reliable way to determine scan length.
    if 'trajectory/liveScanLength' in entry:
        # New files should have num points in trajectory/liveScanLength ...
        n = entry['trajectory/liveScanLength'][()]
    else:
        # Guess length by looking at the counter fields
        # Prefer to not load the entire counter at this point, especially since
        # we don't know where it is.
        n = das['counter/liveROI'].shape[0]
        if n == 1:
            n = das['counter/liveMonitor'].shape[0]
        if n == 1:
            n = das['counter/liveTime'].shape[0]
    self.points = n

    monitor_device = entry.get('control/monitor', {})
    self.monitor.deadtime = data_as(monitor_device, 'dead_time', 'us')
    self.monitor.deadtime_error = data_as(monitor_device, 'dead_time_error',
                                          'us')
    base = str_data(das, 'counter/countAgainst').lower()
    # NICE stores TIME, MONITOR, ROI, TIME_MONITOR, TIME_ROI, etc.
    if "monitor" in base:
        base = "monitor"
    elif "time" in base:
        base = "time"
    elif "roi" in base:
        base = "roi"
    else:
        base = "none"

    self.monitor.time_step = 0.001  # assume 1 ms accuracy on reported clock
    self.monitor.counts = data_as(das,
                                  'counter/liveMonitor',
                                  '',
                                  rep=n,
                                  dtype='d')
    self.monitor.counts_variance = self.monitor.counts.copy()
    self.monitor.count_time = data_as(das, 'counter/liveTime', 's', rep=n)
    self.monitor.roi_counts = data_as(das,
                                      'counter/liveROI',
                                      '',
                                      rep=n,
                                      dtype='d')
    self.monitor.roi_variance = self.monitor.roi_counts.copy()
    self.monitor.roi_variance = self.monitor.roi_counts.copy()
    self.monitor.source_power = data_as(
        das,
        'reactorPower/reactorPowerThermal/average_value',
        'MW',
        rep=n,
        dtype='d')
    self.monitor.source_power_variance = data_as(
        das,
        'reactorPower/reactorPowerThermal/average_value_error',
        'MW',
        rep=n,
        dtype='d')
    self.monitor.source_power_units = "MW"

    # NG7 monitor saturation is stored in control/countrate_correction
    saturation_device = entry.get('control/countrate_correction', None)
    if saturation_device is not None:
        rate = data_as(saturation_device, 'measured_rate', '')
        correction = data_as(saturation_device, 'correction', '')
        self.monitor.saturation = np.vstack((rate, 1. / correction))

    # CRUFT: old candor files don't define NXsample
    self.sample.name = str_data(entry, 'sample/name', default=None)
    self.sample.description = str_data(entry, 'sample/description')
    if self.sample.name is None:
        self.sample.name = str_data(entry, 'DAS_logs/sample/name')
        self.sample.description = str_data(entry,
                                           'DAS_logs/sample/description')

    # TODO: stop trying to guess DOI
    if 'DOI' in entry:
        URI = _s(entry['DOI'])
    else:
        # See: dataflow.modules.doi_resolve for helpers.
        #NCNR_DOI = "10.18434/T4201B"
        NCNR_DOI = "https://ncnr.nist.gov/pub/ncnrdata"
        LOCATION = {
            'pbr': 'ngd',
            'magik': 'cgd',
            'ng7r': 'ng7',
            'candor': 'cdr'
        }
        nice_instrument = str_data(das, 'experiment/instrument').lower()
        instrument = LOCATION.get(nice_instrument, nice_instrument)
        year, month = self.date.year, self.date.month
        cycle = "%4d%02d" % (year, month)
        experiment = str_data(entry, 'experiment_identifier')
        filename = os.path.basename(self.path)
        URI = "/".join(
            (NCNR_DOI, instrument, cycle, experiment, "data", filename))
    self.uri = URI

    self.scan_value = []
    self.scan_units = []
    self.scan_label = []
    if 'trajectory/scannedVariables' in das:
        scanned_variables = list_data(das, 'trajectory/scannedVariables')
        # Just in case the scanned variables is a string with
        # elements separated by new lines...
        if len(scanned_variables) == 1:
            scanned_variables = scanned_variables[0].split('\n')
        # TODO: exclude count fields from scanned variables
        #scanned_variables = [s for s in scanned_variables
        #                     if not s.startswith("areaDetector")]
        for node_id in scanned_variables:
            path = node_id.replace('.', '/')
            try:
                field = das[path]
            except KeyError:
                # Note: Suppressing this message because it makes the
                # regression tests noisy.  Older versions of the SelectFields
                # filter on the datawriter were not stripping the fields from
                # control/scanned variables lists, but newer ones are.
                # TODO: reenable test for missing scan fields
                #print(">>> could not read scanned %s for %s"
                #      % (node_id, os.path.basename(self.path)))
                continue
            try:
                scan_value = data_as(das, path, '', rep=n)
                scan_units = _s(field.attrs.get('units', ''))
                scan_label = _s(field.attrs.get('label', node_id))
            except Exception as exc:
                print(">>> unexpected error %s reading %s for %s" %
                      (str(exc), node_id, os.path.basename(self.path)))
                continue
            # check if numeric:
            if scan_value.dtype.kind in ["f", "u", "i"]:
                self.scan_value.append(scan_value)
                self.scan_units.append(scan_units)
                self.scan_label.append(scan_label)

    # TODO: magnetic field
    if 'temp' in das:
        if 'temp/primaryControlLoop' in das:
            temp_controller = das['temp/primaryControlLoop'][()]
            setpoint_field = 'temp/setpoint_%d' % (temp_controller, )
            self.sample.temp_setpoint = data_as(das, setpoint_field, 'K')[0]
        temp_values = data_as(das, 'temp/primaryNode/value', 'K')
        temp_shape = temp_values.shape[0] if temp_values.shape[0] else 1.0
        # only include one significant figure for temperatures.
        self.sample.temp_avg = round(np.sum(temp_values) / temp_shape, 1)
Exemple #9
0
    def to_NXcanSAS(self):
        import h5py
        from io import BytesIO

        fid = BytesIO()
        h5_item = h5py.File(fid)
        string_dt = h5py.string_dtype(encoding='utf-8')

        metadata = self.metadata[0]
        #entry_name = metadata.get("entry", "entry")
        nxentry = h5_item.create_group("sasentry1")
        nxentry.attrs.update({
            "NX_class": "NXentry",
            "canSAS_class": "SASentry",
            "version": "1.0"
        })
        nxentry["definition"] = "NXcanSAS"
        nxentry["run"] = _s(metadata.get("Sample file", "default_name"))
        nxentry["title"] = ""

        datagroup = nxentry.create_group("sasdata")
        datagroup.attrs.update({
            "NX_class":
            "NXdata",
            "canSAS_class":
            "SASdata",
            "signal":
            "I",
            "I_axes":
            "Q",
            "Q_indices":
            0,
            "timestamp":
            _s(metadata.get("Start time", "unknown")),
        })
        datagroup["I"] = self.iqCOR.x
        datagroup["I"].attrs["units"] = "1/cm"
        datagroup["I"].attrs["uncertainties"] = "Idev"
        datagroup["Idev"] = np.sqrt(self.iqCOR.variance)
        datagroup["Idev"].attrs["units"] = "1/cm"
        datagroup["Q"] = self.Q
        datagroup["Q"].attrs["units"] = "1/angstrom"
        datagroup["Q"].attrs["resolutions"] = "dQl,dQw"
        datagroup["dQl"] = np.ones_like(
            self.Q, dtype='float') * -1.0 * metadata.get("dQv", 0.0)
        datagroup["dQl"].attrs["units"] = "1/angstrom"
        datagroup["dQw"] = np.zeros_like(self.Q, dtype='float')
        datagroup["dQw"].attrs["units"] = "1/angstrom"

        instrument = nxentry.create_group("sasinstrument")
        instrument.attrs.update({
            "canSAS_class": "SASinstrument",
            "NX_class": "NXinstrument"
        })
        sasaperture = instrument.create_group("sasaperture")
        sasaperture.attrs.update({
            "canSAS_class": "SASaperture",
            "NX_class": "NXaperture"
        })
        sasaperture["shape"] = "slit"
        sasaperture["x_gap"] = np.array([0.1], dtype='float')
        sasaperture["x_gap"].attrs["units"] = "cm"
        sasaperture["y_gap"] = np.array([5.0], dtype='float')
        sasaperture["y_gap"].attrs["units"] = "cm"
        sasdetector = instrument.create_group("sasdetector")
        sasdetector.attrs.update({
            "canSAS_class": "SASdetector",
            "NX_class": "NXdetector"
        })
        sasdetector["name"] = "BT5 DETECTOR ARRAY"
        sassource = instrument.create_group("sassource")
        sassource.attrs.update({
            "canSAS_class": "sassource",
            "NX_class": "NXdetector"
        })
        sassource["incident_wavelength"] = np.array([2.38], dtype='float')
        sassource["incident_wavelength"].attrs["units"] = "A"
        sassource["incident_wavelength_spread"] = np.array([0.06],
                                                           dtype='float')
        sassource["incident_wavelength_spread"].attrs["units"] = "A"
        sassource["radiation"] = "Reactor Neutron Source"

        sasprocess = nxentry.create_group("sasprocess")
        sasprocess.attrs.update({
            "canSAS_class": "SASprocess",
            "NX_class": "NXprocess"
        })
        sasprocess["name"] = "NIST reductus"

        sassample = nxentry.create_group("sassample")
        sassample.attrs.update({
            "canSAS_class": "SASsample",
            "NX_class": "NXsample"
        })
        sassample["name"] = ""
        sassample["thickness"] = np.array([metadata.get("Thickness")],
                                          dtype='float')
        sassample["thickness"].attrs["units"] = "cm"
        sassample["transmission"] = np.array([metadata.get("Trock/Twide")],
                                             dtype='float')
        sassample["transmission"].attrs["units"] = "unitless"

        return {
            "name": _s(metadata.get("Sample file", "default_name")),
            "entry": _s(metadata.get("entry", "default_entry")),
            "file_suffix": ".usans.cor.h5",
            "value": h5_item,
        }