Beispiel #1
0
 def test_attrs_pre_eq_step(self):
     self.assertEqual(self.model.name, 'Pre-Equilibration')
     self.assertIsInstance(self.model.solutions, list)
     expected_flow_rate = UnitScalar(200, units=cm_per_hr)
     self.assertEqual(self.model.flow_rate, expected_flow_rate)
     expected_volume = UnitScalar(1.5, units=column_volumes)
     self.assertEqual(self.model.volume, expected_volume)
    def _build_performance_data(self, perf_data, name):
        """
        FIXME: Needed for the design space plot when we display
        that information when changing the start and stop for taking the pool.
        Probably need a Pool class that knows how to compute that data.
        """

        if perf_data is None:
            return

        product = self.product
        product_concentration = UnitScalar(perf_data["pool_concentration"],
                                           units='g/L')
        pool_volume = UnitScalar(perf_data["pool_volume"], units="CV")
        pH = UnitScalar(perf_data["pH"], units="")
        conductivity = UnitScalar(perf_data["conductivity"], units='mS/cm')
        step_yield = UnitScalar(perf_data["step_yield"], units='%')

        # Build product purity assay data based on product assays
        prod_comp_assays = perf_data.pop('product_component_assay_dict')
        product_assay_values = []
        for assay_name in product.product_component_assays:
            if assay_name == STRIP_COMP_NAME:
                assay_fraction = 0.0
            else:
                assay_fraction = prod_comp_assays[assay_name]

            product_assay_values.append(assay_fraction)

        product_assay_values = UnitArray(product_assay_values, units='g/L')

        # Build impurity assay data based on product impurity assays
        impurity_comp_assays = perf_data.pop('impurity_assay_dict')
        impurity_assay_values = []
        for assay_name in product.impurity_assays:
            impurity_assay_values.append(impurity_comp_assays[assay_name])

        impurity_assay_values = UnitArray(impurity_assay_values, units="%")

        pool = SolutionWithProduct(
            name='{}_Pool'.format(name),
            source="Experiment",
            lot_id="unknown",
            solution_type="Pool",
            product=self.product,
            product_concentration=product_concentration,
            product_component_assay_values=product_assay_values,
            impurity_assay_values=impurity_assay_values,
            pH=pH,
            conductivity=conductivity,
        )

        performance_data = PerformanceData(
            name='{}_Performance_Data'.format(name),
            pool_volume=pool_volume,
            pool=pool,
            step_yield=step_yield)

        return performance_data
Beispiel #3
0
def test_offset_unit_computations():
    """ Executing some basic computations with a basic custom unit with offset.
    """
    my_u = unit(12, m.derivation, 14)
    s1 = UnitScalar(3, units=my_u)
    s2 = UnitScalar(5, units=my_u)
    s3 = s1+s2
    assert_equal(s3,UnitScalar(8, units=my_u))
 def test_view_model_with_no_solution_step_and_no_ds(self):
     # This situation can happen for now since the central pane views are
     # not being passed a datasource. Remove test once datasource passed to
     # central pane views.
     data = {
         'step_type': 'Gradient Elution',
         'name': 'Gradient Elution',
         'flow_rate': UnitScalar(100.0, units=cm_per_hr),
         'volume': UnitScalar(8.4, units=column_volumes)
     }
     no_sol_step = MethodStep(**data)
     self.model.method_steps.append(no_sol_step)
     view = MethodModelView(model=self.model)
     ui = view.edit_traits()
     ui.dispose()
    def _build_continuous_data(self, akta_fname, target_experiment=None):
        """ Load all timeseries from AKTA file. These files typically contain a
        time_** and a ** column. They are stored together in a XYData object.

        Parameters
        ----------
        akta_fname : str
            Filename for AKTA file.

        target_experiment : Experiment
            Experimental method implemented to compare output data to method
            data.

        Returns
        -------
        continuous_data : dict
            Dictionary of XYData with all timeseries contained.

        akta_settings : dict
            Settings to read the AKTA file. Contains the time shift if any, and
            the regex used to select file columns for each dataset types.
        """
        from kromatography.ui.akta_file_setting_selector import \
            AKTAFileSettingSelector

        import_settings = {
            "akta_fname": akta_fname,
            "time_of_origin": UnitScalar(0., units="min")
        }

        settings_selector = AKTAFileSettingSelector(
            akta_filepath=akta_fname, target_experiment=target_experiment)
        if self.allow_akta_gui:
            ui = settings_selector.edit_traits(kind="livemodal")
            settings_selected = ui.result
            if settings_selected:
                # Optionally modify some parameters of the experiment method to
                # reconcile mass balance discrepancies:
                settings_selector.apply_strategy()
        else:
            settings_selected = True

        if settings_selected:
            import_settings["time_of_origin"] = \
                settings_selector.time_of_origin
            import_settings["col_name_patterns"] = \
                settings_selector.col_name_patterns
            continuous_data = continuous_data_from_akta(
                import_settings, target_experiment)
        else:
            msg = "AKTA settings window cancelled: there won't be any " \
                  "continuous data loaded from {}".format(akta_fname)
            logger.warning(msg)
            continuous_data = {}

        return continuous_data, import_settings
def dimensionless_test():
    """
    Test the modification to the division, multiplication and pow
    such that a dimensionless quantity formed by is indeed dimensionless
    """

    a = UnitScalar(1.0, units='m')
    b = UnitScalar(2.0, units='mm')
    d = UnitScalar(2.0, units='m**(-1)')

    c = a / b
    e = b * d

    f = UnitScalar(2.0, units=dimensionless)
    g = f**2

    assert_equal(c.units, dimensionless)
    assert_equal(e.units, dimensionless)
    assert_equal(g.units, dimensionless)
    def _build_experiment_output(self, name, target_experiment=None):
        """ Build the experiment results for a given experiment name.

        Parameters
        ----------
        name : str
            Name of the experiment to extract the results from.

        target_experiment : Experiment
            Experiment implemented to compare output data to method data.

        Returns
        -------
        ExperimentResults
            Result object containing fraction data, and all continuous data. If
            no data is specified, the returned Result object contains empty
            dictionaries for the missing data (continuous and/or fraction).
        """
        expt_data = self.excel_data['experiment_data'][name]

        # initialize the continuous data from the AKTA files.
        akta_fname = expt_data['continuous_data']
        if akta_fname is not None:
            # Assumes that the akta file path is relative to the Excel file:
            dir_name = dirname(self.excel_reader.file_path)
            akta_fname = join(dir_name, basename(akta_fname))
            continuous_data, settings = self._build_continuous_data(
                akta_fname, target_experiment)
        else:
            zero_min = UnitScalar(0., units="minute")
            continuous_data = {}
            settings = {"time_of_origin": zero_min, "holdup_volume": zero_min}

        fraction_data = self._build_fraction_data(
            expt_data['fraction_data'],
            time_of_origin=settings["time_of_origin"],
            target_experiment=target_experiment)

        # Initialize the experiment performance results
        raw_perf_data = expt_data['performance_parameter_data']
        if raw_perf_data:
            performance_data = self._build_performance_data(
                raw_perf_data, name)
        else:
            performance_data = None

        results = ExperimentResults(name=name,
                                    fraction_data=fraction_data,
                                    continuous_data=continuous_data,
                                    performance_data=performance_data,
                                    import_settings=settings)
        return results
Beispiel #8
0
    def _get_anion_concentration(self):
        # Note that all ionic components are currently stored as
        # `chemical_components`.

        #: Pool instances from simulations will not have this
        if self.chemical_component_concentrations is None:
            return None

        components = self.chemical_components
        concentrations = self.chemical_component_concentrations
        anion_concs = [
            conc for comp, conc in zip(components, concentrations.tolist())
            if comp.charge < 0
        ]
        return UnitScalar(sum(anion_concs), units=concentrations.units)
Beispiel #9
0
def unit_array_units_converter(unit_array, new_units):
    """ Convert a UnitArray from one set of units to another.
    """
    if unit_array.units != new_units:
        # Need conversion.
        if isinstance(unit_array, ndarray) and unit_array.shape != ():
            # this is an array
            result = UnitArray(units.convert(unit_array.view(ndarray), unit_array.units,
                                             new_units))
        else:
            # this is a scalar
            result = UnitScalar(units.convert(unit_array.view(ndarray), unit_array.units,
                                              new_units))
        result.units = new_units
    else:
        # No conversion needed.  Just return the unit_array.
        result = unit_array

    return result
def array_to_unit_array_converter(array, units):
    """ Create a UnitArray with units='units' from the given 'array'.
    """
    if array.shape == ():
        return UnitScalar(array, units=units)
    return UnitArray(array, units=units)
def scalar_to_unit_scalar_converter(x, units):
    """ Create a UnitScalar with units='units' from the given scalar 'x'.
    """

    return UnitScalar(x, units=units)
Beispiel #12
0
 def _pH_default(self):
     return UnitScalar(0.0, units="1")
Beispiel #13
0
 def _conductivity_default(self):
     return UnitScalar(0.0, units="mS/cm")
from kromatography.model.product import Product
from kromatography.model.product_component import ProductComponent
from kromatography.model.method_step import MethodStep
from kromatography.model.solution_with_product import SolutionWithProduct
from kromatography.utils.chromatography_units import (
    column_volumes, cm_per_hr, extinction_coefficient_unit, fraction,
    gram_per_liter, gram_per_milliliter, gram_per_mol, assay_unit,
    kg_per_liter, mS_per_cm, milli_molar, milliliter, ml_per_min, molar)
from kromatography.model.factories.product import add_strip_to_product

RESIN_DATA = {
    'lot_id': 'Lot123456',
    'name': 'Fractogel-SO3 (M)',
    'ligand': 'SO2',
    'resin_type': 'CEX',
    'average_bead_diameter': UnitScalar(65.0, units='um'),
    'ligand_density': UnitScalar(75.0, units=milli_molar),
    'settled_porosity': UnitScalar(0.42, units=fraction),
}

# Chemical components
COMPONENT_DATA = {
    'name': 'Sodium',
    'charge': UnitScalar(1, units='1'),
    'pKa': UnitScalar(0.0, units='1')
}

COMPONENT2_DATA = {
    'name': 'Chloride',
    'charge': UnitScalar(-1, units='1'),
    'pKa': UnitScalar(0.0, units='1')
    def _build_fraction_data(self,
                             fraction_data,
                             time_of_origin=None,
                             target_experiment=None):
        """ Convert fraction data from Excel fraction sheet into a fraction
        data dictionary to describe ChromatographyResults.

        Parameters
        ----------
        fraction_data : dict
            Data loaded from Excel.

        time_of_origin : UnitScalar [OPTIONAL]
            User specified custom time shift to apply to the data, if the
            experimental output wasn't recorded starting at the desired start.
        """
        if fraction_data is None:
            return {}

        if time_of_origin is None:
            time_of_origin = UnitScalar(0., units="minute")

        # Faction times at which product is sampled and analyzed:
        frac_time = []
        # Component concentration at each fraction time:
        frac_comp_conc = defaultdict(list)
        # Total concentration of product, corrected from extinction at each
        # fraction time:
        frac_prod_absorbances = []

        product = self.product
        for fraction in fraction_data:
            prod_comp_assays = fraction.pop('product_component_assay_dict')
            # FIXME: read units instead of guessing
            total_concentration = self._get_unitted_value(
                'fraction_product_concentration',
                fraction['total_concentration'])
            conc_units = total_concentration.units

            # To compute the product concentrations in the fraction,
            # we must first compute the concentration of each product component
            # then multiply each by their respective extinction coefficient
            # finally sum these values together

            # FIXME: Fraction component concentrations shoud be calculated in
            #        SolutionWithProduct.  This is probably unnecessary --
            #        Look into later.
            namespace = {}
            namespace.update(prod_comp_assays)
            namespace["product_concentration"] = float(total_concentration)
            prod_comp_conc = [
                eval(expression, namespace)
                for expression in product.product_component_concentration_exps
            ]

            frac_component_concs = UnitArray(prod_comp_conc, units=conc_units)
            # Total concentration, corrected by the components' extinction coef
            ext_coefs = [
                comp.extinction_coefficient
                for comp in product.product_components
            ]
            ext_coef_array = unitted_list_to_array(ext_coefs)
            # This converts the component concentration to absorption
            frac_component_absorb = frac_component_concs * ext_coef_array
            tot_prod_absorbance = sum(frac_component_absorb)
            frac_prod_absorbances.append(tot_prod_absorbance)

            frac_time.append(fraction['time'])
            for ii, comp_name in enumerate(product.product_component_names):
                frac_comp_conc[comp_name].append(frac_component_absorb[ii])

        # Apply the user defined origin shift:
        # Warning: this leads to all fraction XYData sharing the same array!
        frac_time = np.array(frac_time, dtype='float64') - time_of_origin[()]
        frac_prod_absorbances = np.array(frac_prod_absorbances,
                                         dtype='float64')

        fraction_outputs = {}
        for key, data in frac_comp_conc.items():
            fraction_outputs[key] = XYData(
                name=key,
                x_data=frac_time,
                # FIXME: read units instead of guessing
                x_metadata={
                    "time_of_origin": time_of_origin,
                    "units": "min"
                },
                y_data=data,
                # FIXME: read units instead of guessing
                y_metadata={
                    "Description":
                    "Absorbances for product "
                    "components {}".format(key),
                    "units":
                    "AU/cm"
                },
            )

        fraction_outputs[FRACTION_TOTAL_DATA_KEY] = XYData(
            name=FRACTION_TOTAL_DATA_KEY,
            x_data=frac_time,
            # FIXME: read units instead of guessing
            x_metadata={
                "time_of_origin": time_of_origin,
                "units": "min"
            },
            y_data=frac_prod_absorbances,
            # FIXME: read units instead of guessing
            y_metadata={
                "Description": "Sum of absorbances per unit path "
                "length for all product components.",
                "units": "AU/cm"
            },
        )

        shift_fraction_data_by_holdup_vol(fraction_outputs, target_experiment)
        return fraction_outputs
Beispiel #16
0
 def test_construction_gradient_elution_step(self):
     step = MethodStep(**GRADIENT_ELUTION_STEP)
     self.assertEqual(step.name, 'Gradient Elution')
     self.assertEqual(len(step.solutions), 2)
     self.assertEqual(step.flow_rate, UnitScalar(100, units=cm_per_hr))
     self.assertEqual(step.volume, UnitScalar(8.4, units=column_volumes))