예제 #1
0
def test_zero_mode_after():
    """Test zero with the mode of after."""
    data = np.arange(10) * units('mm')

    result = zero(data, 5, mode='after')

    truth = np.array([-5, -4, -3, -2, -1, 0, 0, 0, 0, 0]) * units('mm')

    assert_array_almost_equal(result, truth)
예제 #2
0
def test_remove_offset():
    """Test the remove offset function."""
    data = np.array([0, 1, 2, 4, 4, 10, 10, 11, 12, 13, 14]) * units('mm')

    result = remove_offset(data, 4, 6)

    truth = np.array([0, 1, 2, 4, 4, 10, 4, 5, 6, 7, 8]) * units('mm')

    assert_array_almost_equal(result, truth)
예제 #3
0
def test_zero_mode_before():
    """Test zero with the mode of before."""
    data = np.arange(10) * units('mm')

    result = zero(data, 5, mode='before')

    truth = np.array([0, 0, 0, 0, 0, 0, 1, 2, 3, 4]) * units('mm')

    assert_array_almost_equal(result, truth)
예제 #4
0
def test_zero_defaults():
    """Test zero with all of the default args."""
    data = np.arange(10) * units('mm')

    result = zero(data, 5)

    truth = np.array([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]) * units('mm')

    assert_array_almost_equal(result, truth)
예제 #5
0
def test_zero_value_at_mode():
    """Test zeroing with an offset value in the default at mode."""
    data = np.arange(10) * units('mm')

    result = zero(data, 5, value=1.5 * units('mm'))

    truth = np.array([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5
                      ]) * units('mm')

    assert_array_almost_equal(result, truth)
예제 #6
0
def test_zero_value_before_mode():
    """Test zeroing with an offset value in the before mode."""
    data = np.arange(10) * units('mm')

    result = zero(data, 5, value=1.5 * units('mm'), mode='before')

    truth = np.array([1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 2.5, 3.5, 4.5, 5.5
                      ]) * units('mm')

    assert_array_almost_equal(result, truth)
예제 #7
0
def test_zero_value_after_mode():
    """Test zeroing with an offset value in the after mode."""
    data = np.arange(10) * units('mm')

    result = zero(data, 5, value=1.5 * units('mm'), mode='after')

    truth = np.array([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 1.5, 1.5, 1.5, 1.5
                      ]) * units('mm')

    assert_array_almost_equal(result, truth)
예제 #8
0
def test_zero_window():
    """Test zero with a window to get an average zero value."""
    data = np.array([0, 1, 2, 2.2, 2.5, 2.3, 2.2, 2.6, 2.7, 2.9, 3
                     ]) * units('mm')

    result = zero(data, 5, window=2)

    truth = data - 2.36 * units('mm')

    assert_array_almost_equal(result, truth)
예제 #9
0
def test_friction():
    """Test that the friction calculation produces expected values with same units."""
    sigma_n = np.array([-.1, 0, 1, 2, 3, 4, 5]) * units('kN')
    tau = np.array([0, 0, 2.2, 2.2, 2.2, 2.2, 2.2]) * units('kN')

    result = friction(tau, sigma_n)

    truth = np.array([0, 0, 2.2, 1.1, 0.7333333, 0.55, 0.44
                      ]) * units('dimensionless')

    assert_array_almost_equal(result, truth)
예제 #10
0
def test_friction_different_units():
    """Test that the friction calculation produces expected values with different units."""
    sigma_n = np.array([-100, 0, 1000, 2000, 3000, 4000, 5000]) * units('N')
    tau = np.array([0, 0, 2.2, 2.2, 2.2, 2.2, 2.2]) * units('kN')

    result = friction(tau, sigma_n)

    truth = np.array([0, 0, 2.2, 1.1, 0.7333333, 0.55, 0.44
                      ]) * units('dimensionless')

    assert_array_almost_equal(result, truth)
예제 #11
0
def test_elastic_correction_linear_same_units():
    """Test the elastic correction with all consistent units given."""
    coeffs = [5 * units('mm/kN'), 10 * units('mm')]
    loads = np.arange(10, 101, 10) * units('kN')
    displacements = (np.arange(1, 11) * 1000) * units('mm')

    result = elastic_correction(loads, displacements, coeffs)

    truth = np.array([
        940, 1890, 2840, 3790, 4740, 5690, 6640, 7590, 8540, 9490
    ]) * units('mm')
    assert_array_almost_equal(result, truth)
예제 #12
0
    def get_data_dict(self, data_units=None, ignore_unknown_units=False):
        """
        Format the data into a dictionary of quantity arrays.

        Create a data dict like the rest of pylook uses and attach user given units.
        If we get no user given units we try to parse what we have in Xlook and either
        error (default) or can assign unitless to everything that is unrecognized.

        Parameters
        ----------
        data_units : list
            List of quantities for each data column. Overwrites and units from the file
            metadata.
        ignore_unknown_units : boolean
            If True any units from the file metadata that we cannot parse are set to
            dimensionless and a warning issued. If False (default) an error is raised.

        Returns
        -------
        data : dict
            Dictionary of quantity arrays
        """
        # If units are not given, let's try to parse what we got from the units the user
        # gave Xlook.
        if data_units is None:
            data_units = self.data_units

        d = {}
        for i, (name, unit) in enumerate(zip(self.data_names, data_units)):

            # If there's nothing here, just to go to the next column
            if (name is None) and (unit is None):
                continue

            # XLook users commonly used a . for dimensionless
            if unit == '.':
                unit = 'dimensionless'

            try:
                d[name] = self.data[i] * units(unit)
            except UndefinedUnitError:
                if ignore_unknown_units:
                    d[name] = self.data[i] * units('dimensionless')
                    warnings.warn(f'Unknown unit {unit} for data {name}'
                                  ' was assigned dimensionless')
                else:
                    raise UndefinedUnitError(unit)
        return d
예제 #13
0
    def command_zero(self, command):
        """
        Zero a column at a record.

        Parameters
        ----------
        command : str
            command from r file

        Notes
        -----
        The Xlook command is `zero column_number record_index`.

        See Also
        --------
        pylook.calc.zero
        """
        if not self._check_number_of_arguments(command, 3):
            return
        (_, input_col_idx, zero_record) = command.split()
        input_col_idx = int(input_col_idx)
        zero_record = int(zero_record)
        result = lc.zero(
            self._get_data_by_index(input_col_idx) * units('dimensionless'),
            zero_record)
        self._set_data_by_index(
            input_col_idx, result.m)  # We are not touching units and names
예제 #14
0
    def command_ec(self, command):
        """
        Perform a linear elastic correction of a column.

        Parameters
        ----------
        command : str
            command from r file

        Notes
        -----
        This is not unit safe like the pure Python version would be as we're shedding units
        all around in the interpreter. The XLook command was `ec displacement_column_number
        load_column_number new_column_number first_row_index last_row_index`
        """
        if not self._check_number_of_arguments(command, 9):
            return

        (_, disp_col_idx, load_col_idx, output_col_idx, first_idx, last_idx,
         slope, output_name, output_unit) = command.split()
        disp_col_idx = int(disp_col_idx)
        load_col_idx = int(load_col_idx)
        output_col_idx = int(output_col_idx)
        first_idx = int(first_idx)
        last_idx = int(last_idx)
        slope = 1 / float(slope)

        # Get the data and assign units - they don't matter we just need them for the
        # pylook functions to work
        load_data = self._get_data_by_index(load_col_idx) * units(
            'dimensionless')
        disp_data = self._get_data_by_index(disp_col_idx) * units(
            'dimensionless')
        coeffs = [slope * units('dimensionless'), 0 * units('dimensionless')]

        ec_corrected_disp = disp_data - lc.elastic_correction(
            load_data, disp_data, coeffs)

        # Drop our unit charade
        ec_corrected_disp = ec_corrected_disp.m
        disp_data = disp_data.m

        disp_data[first_idx:last_idx] = ec_corrected_disp[first_idx:last_idx]
        self._set_data_by_index(output_col_idx, disp_data)
        self._set_name_by_index(output_col_idx, output_name)
        self._set_units_by_index(output_col_idx, output_unit)
예제 #15
0
def test_elastic_correction_quadratic_same_units():
    """Test the elastic correction with all consistent units given."""
    coeffs = [2 * units('mm/kN**2'), 5 * units('mm/kN'), 10 * units('mm')]
    loads = np.arange(10, 101, 10) * units('kN')
    displacements = (np.arange(1, 11) * 1000) * units('mm')

    result = elastic_correction(loads, displacements, coeffs)

    truth = np.array([
        740, 1090, 1040, 590, -260, -1510, -3160, -5210, -7660, -10510
    ]) * units('mm')
    assert_array_almost_equal(result, truth)
예제 #16
0
    def command_offset_int(self, command):
        """
        Perform an offset over an interval in the data.

        Parameters
        ----------
        command : str
            command from r file

        Notes
        -----
        The Xlook command is `offset_int column_number record_start_index record_end_index
        (y or n) to offset in between during the offset.`
        """
        if not self._check_number_of_arguments(command, 5):
            return

        (_, col_idx, start_idx, stop_idx, set_between) = command.split()
        col_idx = int(col_idx)
        start_idx = int(start_idx)
        stop_idx = int(stop_idx)
        if set_between.strip().lower() == 'y':
            set_between = True
        elif set_between.strip().lower() == 'n':
            set_between = False
        else:
            self.command_invalid()

        col_data = self._get_data_by_index(col_idx)

        col_data = lc.remove_offset(col_data * units('dimensionless'),
                                    start_idx,
                                    stop_idx,
                                    set_between=set_between)

        self._set_data_by_index(col_idx, col_data.m)
예제 #17
0
##############################

data_path = get_test_data('p655intact100l')

##############################

data, _ = read_binary(data_path)

##############################
# The time column in these data files is really the sample rate in Hz. We want to turn that
# into a delta time and cumulatively sum to get experiment elapsed time. Notice that we are
# assigning units by multiplying them - easy! We have to take the magnitude of time for
# `cumsum` because numpy silently drops our units. We'll fix that with a `cumsum` wrapper
# in pylook soon.

data['Time'] = np.cumsum(1 / data['Time'].m) * units('s')

##############################
# Now we need to apply calibrations and units to the data - the calibrations are determined
# based on the sensor used by the experimentalist. We'll get a list of the current data column
# names so we know what they are called in the look file.

data.keys()

##############################

data['Vert_Disp'] = data['Vert_Disp'] * 0.076472 * units('micron / bit')
data['Vert_Load'] = data['Vert_Load'] * 1.597778959e-3 * units('MPa / bit')
data['Hor_Disp'] = data['Hor_Disp'] * 0.11017176 * units('micron / bit')
data['Hor_Load.'] = data['Hor_Load.'] * 3.31712805707e-3 * units('MPa / bit')
예제 #18
0
def read_binary(filename,
                data_endianness=None,
                unrecognized_units='ignore',
                clean_header=True):
    """
    Read a look binary formatted file into a dictionary of united arrays.

    Parameters
    ----------
    filename : string
        Filename or path to file to read
    data_endianness: string
        Endianness of the data section of the file. None, 'big', or 'little'.
        None interprets the file as it believes fit, big and little force the
        endianness.
    unrecogized_units : string
        'ignore' (defualt) assigns dimensionless to unrecognized units, 'error' will
        fail if unrecognized units are encountered.
    clean_header : boolean
        Remove extra whitespace in the header data column names and units. Default True.

    Returns
    -------
    data : dict
        Dictionary of `pint.Quantity` arrays for each column of data.
    metadata : dict
        Metadata from the header of the file

    Notes
    -----
    The data section of the file is written in the native format of the machine
    used to produce the file.  Endianness of data is little by default, but may
    be changed to 'big' to accomodate older files or files written on power pc
    chips.
    """
    if type(filename) == str:
        filename = Path(filename)

    if data_endianness is None:
        data_endianness = 'little'

    metadata = _read_binary_file_metadata(filename, clean_header=clean_header)

    if metadata['bytes per data point'] == 4:
        data_endianness = 'big'

    with open(filename, 'rb') as f:

        # Seek past the file metadata header that we have already processed
        f.seek(36)

        col_headings = []
        col_recs = []
        col_units = []

        # For each possible column (32 maximum columns) unpack its header
        # information and store it.  Only store column headers of columns
        # that contain data.  Use termination at first NULL.
        for i in range(metadata['header format']):
            # Channel name (13 characters)
            chname = struct.unpack('13c', f.read(13))
            chname = _binary_tuple_to_string(chname)
            chname = chname.split('\0')[0]

            # Channel units (13 characters)
            chunits = struct.unpack('13c', f.read(13))
            chunits = _binary_tuple_to_string(chunits)
            chunits = chunits.split('\0')[0]

            # This field is now unused, so we just read past it (int)
            _ = struct.unpack('>i', f.read(4))

            # This field is now unused, so we just read past it (50 characters)
            _ = struct.unpack('50c', f.read(50))

            # Number of elements (int)
            nelem = struct.unpack('>i', f.read(4))
            nelem = int(nelem[0])

            if clean_header:
                chname = chname.strip()
                chunits = chunits.strip()

            if chname[0:6] == 'no_val':
                continue  # Skip Blank Channels
            else:
                col_headings.append(chname)
                col_recs.append(nelem)
                col_units.append(chunits)

        # Read the data into a numpy recarray
        data = np.empty(
            [metadata['number of records'], metadata['number of columns']])

        # Make the right data formatter for the file
        if metadata['bytes per data point'] == 8:
            data_point_format_little_endian = '<d'
            data_point_format_big_endian = '>d'
        elif metadata['bytes per data point'] == 4:
            data_point_format_little_endian = '<f'
            data_point_format_big_endian = '>f'
        else:
            ValueError('Bytes per data must be 4 or 8. Got'
                       f" {metadata['bytes per data point']}")

        for col in range(metadata['number of columns']):
            for row in range(col_recs[col]):
                if data_endianness == 'little':
                    data[row, col] = struct.unpack(
                        data_point_format_little_endian,
                        f.read(metadata['bytes per data point']))[0]
                elif data_endianness == 'big':
                    data[row, col] = struct.unpack(
                        data_point_format_big_endian,
                        f.read(metadata['bytes per data point']))[0]
                else:
                    ValueError(
                        'Data endian setting invalid - options are little and big'
                    )

    data_dict = {}
    data_dict['rec_num'] = np.arange(
        metadata['number of records']) * units('dimensionless')

    for i, (name, unit) in enumerate(zip(col_headings, col_units)):
        data_unit = units('dimensionless')
        try:
            data_unit = units(unit)

        except UndefinedUnitError:
            if unrecognized_units == 'ignore':
                warnings.warn(
                    f'Unknown unit {unit} - assigning dimensionless units.')
            else:
                raise UndefinedUnitError(unit)

        data_dict[name] = data[:, i] * data_unit

    return data_dict, metadata
예제 #19
0
# The data are currently in a list of arrays in the object, but we want to get the same data
# structure we work with when dealing with data in pure Python - a dictionary of quantity
# arrays! That can be tricky because we need to assign units which are sometimes misspelled
# or just odd. The `get_data_dict` method will do its best, but ultimately fail with unknown
# units. With the `ignore_unknown_units` argument set to `True` it will warn and assign
# dimensionless to anything it doesn't understand. You can also manually specify units for
# all columns, but it is generally easier to fix it up later in practice.

d = look.get_data_dict(ignore_unknown_units=True)

##############################
# Import our unit registry and fix up the bad units to microns as they should have been.
from pylook.units import units

# Fix up that bad unit name
d['ec_disp'] = d['ec_disp'] * units('micron')

##############################
# We'll use Bokeh to take a quick look at the data. Matplotlib is the best choice for your
# publication plots, but the speed and interactivity of Bokeh in the notebook is hard to beat.
# We'll be adding helpers to pylook to make this process easier in the future as well.

# We need to do some imports from bokeh and turn on the notebook backend.

from bokeh.io import output_notebook
from bokeh.layouts import gridplot
from bokeh.plotting import figure, show

output_notebook()

##############################