Beispiel #1
0
class ScatterplotView(Base2DView):
    """
    Plots a 2-d scatterplot.  
    
    Attributes
    ----------
    
    Examples
    --------
    
    Make a little data set.
    
    .. plot::
        :context: close-figs
            
        >>> import cytoflow as flow
        >>> import_op = flow.ImportOp()
        >>> import_op.tubes = [flow.Tube(file = "Plate01/RFP_Well_A3.fcs",
        ...                              conditions = {'Dox' : 10.0}),
        ...                    flow.Tube(file = "Plate01/CFP_Well_A4.fcs",
        ...                              conditions = {'Dox' : 1.0})]
        >>> import_op.conditions = {'Dox' : 'float'}
        >>> ex = import_op.apply()
        
    Plot a density plot
    
    .. plot::
        :context: close-figs
    
        >>> flow.ScatterplotView(xchannel = 'V2-A',
        ...                      xscale = 'log',
        ...                      ychannel = 'Y2-A',
        ...                      yscale = 'log',
        ...                      huefacet = 'Dox').plot(ex)
        
    """

    id = Constant('edu.mit.synbio.cytoflow.view.scatterplot')
    friend_id = Constant("Scatter Plot")

    def plot(self, experiment, **kwargs):
        """
        Plot a faceted scatter plot view of a channel
        
        Parameters
        ----------
        
        alpha : float (default = 0.25)
            The alpha blending value, between 0 (transparent) and 1 (opaque).
            
        s : int (default = 2)
            The size in points^2.
            
        marker : a matplotlib marker style, usually a string
            Specfies the glyph to draw for each point on the scatterplot.
            See `matplotlib.markers`_ for examples.  Default: 'o'
            
        .. _matplotlib.markers: http://matplotlib.org/api/markers_api.html#module-matplotlib.markers
        

        Notes
        -----
        Other ``kwargs`` are passed to `matplotlib.pyplot.scatter <https://matplotlib.org/devdocs/api/_as_gen/matplotlib.pyplot.scatter.html>`_
  
        
        """

        super().plot(experiment, **kwargs)

    def _grid_plot(self, experiment, grid, xlim, ylim, xscale, yscale,
                   **kwargs):

        kwargs.setdefault('alpha', 0.25)
        kwargs.setdefault('s', 2)
        kwargs.setdefault('marker', 'o')
        kwargs.setdefault('antialiased', True)

        grid.map(plt.scatter, self.xchannel, self.ychannel, **kwargs)

        return {}
class MATS3DMicroplaneDamageWu(MATSXDMicroplaneDamageFatigueWu, MATS3DEval):

    #-----------------------------------------------
    # number of microplanes - currently fixed for 3D
    #-----------------------------------------------
    n_mp = Constant(28)

    #-----------------------------------------------
    # get the normal vectors of the microplanes
    #-----------------------------------------------
    _MPN = Property(depends_on='n_mp')

    @cached_property
    def _get__MPN(self):
        # microplane normals:
        return array([[.577350259, .577350259, .577350259],
                      [.577350259, .577350259, -.577350259],
                      [.577350259, -.577350259, .577350259],
                      [.577350259, -.577350259, -.577350259],
                      [.935113132, .250562787, .250562787],
                      [.935113132, .250562787, -.250562787],
                      [.935113132, -.250562787, .250562787],
                      [.935113132, -.250562787, -.250562787],
                      [.250562787, .935113132, .250562787],
                      [.250562787, .935113132, -.250562787],
                      [.250562787, -.935113132, .250562787],
                      [.250562787, -.935113132, -.250562787],
                      [.250562787, .250562787, .935113132],
                      [.250562787, .250562787, -.935113132],
                      [.250562787, -.250562787, .935113132],
                      [.250562787, -.250562787, -.935113132],
                      [.186156720, .694746614, .694746614],
                      [.186156720, .694746614, -.694746614],
                      [.186156720, -.694746614, .694746614],
                      [.186156720, -.694746614, -.694746614],
                      [.694746614, .186156720, .694746614],
                      [.694746614, .186156720, -.694746614],
                      [.694746614, -.186156720, .694746614],
                      [.694746614, -.186156720, -.694746614],
                      [.694746614, .694746614, .186156720],
                      [.694746614, .694746614, -.186156720],
                      [.694746614, -.694746614, .186156720],
                      [.694746614, -.694746614, -.186156720]])

    #-------------------------------------
    # get the weights of the microplanes
    #-------------------------------------
    _MPW = Property(depends_on='n_mp')

    @cached_property
    def _get__MPW(self):
        # Note that the values in the array must be multiplied by 6 (cf. [Baz05])!
        # The sum of of the array equals 0.5. (cf. [BazLuz04]))
        # The values are given for an Gaussian integration over the unit
        # hemisphere.
        return array([
            .0160714276, .0160714276, .0160714276, .0160714276, .0204744730,
            .0204744730, .0204744730, .0204744730, .0204744730, .0204744730,
            .0204744730, .0204744730, .0204744730, .0204744730, .0204744730,
            .0204744730, .0158350505, .0158350505, .0158350505, .0158350505,
            .0158350505, .0158350505, .0158350505, .0158350505, .0158350505,
            .0158350505, .0158350505, .0158350505
        ]) * 6.0

    #-------------------------------------------------------------------------
    # Cached elasticity tensors
    #-------------------------------------------------------------------------

    @cached_property
    def _get_elasticity_tensors(self):
        '''
        Intialize the fourth order elasticity tensor for 3D or 2D plane strain or 2D plane stress
        '''
        # ----------------------------------------------------------------------------
        # Lame constants calculated from E and nu
        # ----------------------------------------------------------------------------

        # first Lame paramter
        la = self.E * self.nu / ((1 + self.nu) * (1 - 2 * self.nu))
        # second Lame parameter (shear modulus)
        mu = self.E / (2 + 2 * self.nu)

        # -----------------------------------------------------------------------------------------------------
        # Get the fourth order elasticity and compliance tensors for the 3D-case
        # -----------------------------------------------------------------------------------------------------

        # construct the elasticity tensor (using Numpy - einsum function)
        delta = identity(3)
        D_ijkl = (einsum(',ij,kl->ijkl', la, delta, delta) +
                  einsum(',ik,jl->ijkl', mu, delta, delta) +
                  einsum(',il,jk->ijkl', mu, delta, delta))

        return D_ijkl
Beispiel #3
0
class MATSEval(HasStrictTraits, TStepperEval):

    implements(IMATSEval)

    # Callable specifying spatial profile of an initial strain field
    # the parameter is X - global coordinates of the material point
    #
    initial_strain = Callable

    # Callable specifying spatial profile of an initial stress field
    # the parameter is X - global coordinates of the material point
    #
    initial_stress = Callable

    # number of spatial dimensions of an integration cell for the material model
    #
    n_dims = Constant(Float)

    id_number = Int

    #-------------------------------------------------------------------------
    # Dimensionally dependent mappings between tensors
    #-------------------------------------------------------------------------
    # These are the handbook methods to be specialized in subclasses.

    # Mappings between tensorial and engineering variables
    #
    map_eps_eng_to_mtx = Callable(transient=True)
    map_sig_eng_to_mtx = Callable(transient=True)
    map_eps_mtx_to_eng = Callable(transient=True)
    map_sig_mtx_to_eng = Callable(transient=True)
    compliance_mapping = Callable(transient=True)
    map_tns4_to_tns2 = Callable(transient=True)

    def get_state_array_size(self):
        return 0

    def setup(self, sctx):
        pass

    explorer_rtrace_list = Property

    @cached_property
    def _get_explorer_rtrace_list(self):
        return []

    #-------------------------------------------------------------------------
    # Response trace evaluators
    #-------------------------------------------------------------------------
    def get_msig_pos(self, sctx, eps_app_eng, *args, **kw):
        '''
        get biggest positive principle stress
        @param sctx:
        @param eps_app_eng:
        '''
        sig_eng, D_mtx = self.get_corr_pred(sctx, eps_app_eng, 0, 0, 0)
        ms_vct = zeros(3)
        shape = sig_eng.shape[0]
        if shape == 3:
            s_mtx = self.map_sig_eng_to_mtx(sig_eng)
            m_sig, m_vct = linalg.eigh(s_mtx)

            # @todo: - this must be written in a more readable way
            #
            if m_sig[-1] > 0:
                # multiply biggest positive stress with its vector
                ms_vct[:2] = m_sig[-1] * m_vct[-1]
        elif shape == 6:
            s_mtx = self.map_sig_eng_to_mtx(sig_eng)
            m_sig = linalg.eigh(s_mtx)
            if m_sig[0][-1] > 0:
                # multiply biggest positive stress with its vector
                ms_vct = m_sig[0][-1] * m_sig[1][-1]
        return ms_vct

    def get_msig_pm(self, sctx, eps_app_eng, *args, **kw):
        sig_eng, D_mtx = self.get_corr_pred(sctx, eps_app_eng, 0, 0, 0)
        t_field = zeros(9)
        shape = sig_eng.shape[0]
        if shape == 3:
            s_mtx = self.map_sig_eng_to_mtx(sig_eng)
            m_sig = linalg.eigh(s_mtx)
            if m_sig[0][-1] > 0:
                t_field[0] = m_sig[0][-1]  # biggest positive stress
        elif shape == 6:
            s_mtx = self.map_sig_eng_to_mtx(sig_eng)
            m_sig = linalg.eigh(s_mtx)
            if m_sig[0][-1] > 0:
                t_field[0] = m_sig[0][-1]
        return t_field

    def get_max_principle_sig(self, sctx, eps_app_eng, *args, **kw):
        '''
        get biggest positive principle stress
        @param sctx:
        @param eps_app_eng:
        '''
        sig_eng, D_mtx = self.get_corr_pred(sctx, eps_app_eng, 0, 0, 0)
        s_mtx = self.map_sig_eng_to_mtx(sig_eng)
        m_sig, m_vct = linalg.eigh(s_mtx)
        max_principle_sig = max(m_sig[:])
        # return max_principle_sig
        return array([max_principle_sig], dtype='float')

    def get_sig_app(self, sctx, eps_app_eng, *args, **kw):
        sig_eng, D_mtx = self.get_corr_pred(sctx, eps_app_eng, 0, 0, 0, *args,
                                            **kw)
        s_tensor = zeros((3, 3))
        s_tensor[:self.n_dims, :self.n_dims] = self.map_sig_eng_to_mtx(sig_eng)
        return s_tensor

    def get_eps_app(self, sctx, eps_app_eng, *args, **kw):
        e_tensor = zeros((3, 3))
        e_tensor[:self.n_dims, :self.n_dims] = self.map_eps_eng_to_mtx(
            eps_app_eng)
        return e_tensor

    # This is only relevant for strain softening models
    #
    def get_regularizing_length(self, X_mtx, eps_app_eng, *args, **kw):

        eigval, eigvec = eigh(self.map_eps_eng_to_mtx(eps_app_eng))

        # Get the eigenvector associated with maximum aigenvalue
        # it is located in the last column of the matrix
        # of eigenvectors eigvec
        #
        eps_one = eigvec[:, -1]
        print X_mtx
        # Project the coordinate vectors into the determined direction
        #
        proj = dot(X_mtx, eps_one)

        # Find the maximum distance between the projected coordinates
        #
        h = max(proj) - min(proj)
        return h

    def get_strain_energy(self, sctx, eps_app_eng, *args, **kw):
        sig_app = self.get_sig_app(sctx, eps_app_eng)
        eps_app = self.get_eps_app(sctx, eps_app_eng)
        energy = tensordot(sig_app, eps_app) * 0.5
        return energy

    # Declare and fill-in the explorer config
    # Each material model can define the default configuration
    # to present itself in the explorer.
    #
    explorer_config = Property(Dict)

    @cached_property
    def _get_explorer_config(self):
        from ibvpy.api import BCDof, TLine, RTraceGraph
        return {
            'bcond_list':
            [BCDof(var='u', dof=0, value=0.01, time_function=lambda t: t)],
            'rtrace_list': [
                RTraceGraph(name='strain - stress',
                            var_x='eps_app',
                            idx_x=0,
                            var_y='sig_app',
                            idx_y=0,
                            record_on='update'),
                RTraceGraph(name='strain - strain',
                            var_x='eps_app',
                            idx_x=0,
                            var_y='eps_app',
                            idx_y=1,
                            record_on='update'),
                RTraceGraph(name='stress - stress',
                            var_x='sig_app',
                            idx_x=0,
                            var_y='sig_app',
                            idx_y=1,
                            record_on='update'),
                RTraceGraph(name='Stress - Strain',
                            var_x='F_int',
                            idx_x=0,
                            var_y='U_k',
                            idx_y=0,
                            record_on='update'),
                RTraceGraph(name='Strain - Strain',
                            var_x='U_k',
                            idx_x=0,
                            var_y='U_k',
                            idx_y=1,
                            record_on='update'),
                RTraceGraph(name='Stress - Stress',
                            var_x='F_int',
                            idx_x=0,
                            var_y='F_int',
                            idx_y=1,
                            record_on='update'),
                RTraceGraph(name='sig1 - eps1',
                            var_x='F_int',
                            idx_x=0,
                            var_y='U_k',
                            idx_y=0,
                            record_on='update'),
                RTraceGraph(name='sig2 - sig3',
                            var_x='F_int',
                            idx_x=1,
                            var_y='F_int',
                            idx_y=2,
                            record_on='update'),
                RTraceGraph(name='eps2 - eps3',
                            var_x='U_k',
                            idx_x=1,
                            var_y='U_k',
                            idx_y=2,
                            record_on='update')
            ],
            'tline':
            TLine(step=0.1, max=1.0)
        }

        def _set_explorer_config(self, value):
            self._explorer_config = value
class ColorTranslationOp(HasStrictTraits):
    """
    Translate measurements from one color's scale to another, using a two-color
    or three-color control.
    
    To use, set up the :attr:`controls` dictionary with the channels to convert
    and the FCS files to compute the mapping.  Call :meth:`estimate` to
    paramterize the module; check that the plots look good by calling the 
    :meth:`~ColorTranslationDiagnostic.plot` method of the 
    :class:`ColorTranslationDiagnostic` instance returned by :meth:`default_view`;
    then call :meth:`apply` to apply the translation to an :class:`.Experiment`.
    
    Attributes
    ----------
    controls : Dict((Str, Str), File)
        Two-color controls used to determine the mapping.  They keys are 
        tuples of **from-channel** and **to-channel**.  The values are FCS files 
        containing two-color constitutive fluorescent expression data 
        for the mapping.
        
    mixture_model : Bool (default = False)
        If ``True``, try to model the **from** channel as a mixture of expressing
        cells and non-expressing cells (as you would get with a transient
        transfection), then weight the regression by the probability that the
        the cell is from the top (transfected) distribution.  Make sure you 
        check the diagnostic plots to see that this worked!
        
    linear_model : Bool (default = False)
        Set this to ``True`` to get a scaling that is strictly multiplicative,
        mirroring the TASBE approach.  Do check the diagnostic plot, though,
        to see how well (or poorly) your model fits the data.
        
    control_conditions : Dict((Str, Str), Dict(Str, Any))
        Occasionally, you'll need to specify the experimental conditions that
        the bleedthrough tubes were collected under (to apply the operations in the 
        history.)  Specify them here.  The key is a tuple of channel names; the 
        value is a dictionary of the conditions (same as you would specify for a
        :class:`~.Tube` )

        
    Notes
    -----
    In the TASBE workflow, this operation happens *after* the application of
    :class:`.AutofluorescenceOp` and :class:`.BleedthroughLinearOp`.  The entire
    operation history of the :class:`.Experiment` that is passed to 
    :meth:`estimate` is replayed on the control files in :attr:`controls`, so
    they are also corrected for autofluorescence and bleedthrough, and have
    metadata for subsetting.
    

    Examples
    --------
    Create a small experiment:
    
    .. plot::
        :context: close-figs
    
        >>> import cytoflow as flow
        >>> import_op = flow.ImportOp()
        >>> import_op.tubes = [flow.Tube(file = "tasbe/mkate.fcs")]
        >>> ex = import_op.apply()
    
    Create and parameterize the operation
    
    .. plot::
        :context: close-figs

        >>> color_op = flow.ColorTranslationOp()
        >>> color_op.controls = {("Pacific Blue-A", "FITC-A") : "tasbe/rby.fcs",
        ...                      ("PE-Tx-Red-YG-A", "FITC-A") : "tasbe/rby.fcs"}
        >>> color_op.mixture_model = True
    
    Estimate the model parameters
    
    .. plot::
        :context: close-figs 
    
        >>> color_op.estimate(ex)
    
    Plot the diagnostic plot
    
    .. plot::
        :context: close-figs

        >>> color_op.default_view().plot(ex)  

    Apply the operation to the experiment
    
    .. plot::
        :context: close-figs
    
        >>> ex = color_op.apply(ex)  
    """

    # traits
    id = Constant('edu.mit.synbio.cytoflow.operations.color_translation')
    friendly_id = Constant("Color translation")

    name = Constant("Color Translation")

    translation = util.Removed(
        err_string=
        "'translation' is removed; the same info is found in 'controls'",
        warning=True)
    controls = Dict(Tuple(Str, Str), File)
    controls_frames = Dict(Tuple(Str, Str), Instance(DataFrame))
    mixture_model = Bool(False)
    linear_model = Bool(False)

    control_conditions = Dict(Tuple(Str, Str), Dict(Str, Any), {})

    # The regression coefficients determined by `estimate()`, used to map
    # colors between channels.  The keys are tuples of (*from-channel*,
    # *to-channel) (corresponding to key-value pairs in `translation`).  The
    # values are lists of Float, the log-log coefficients for the color
    # translation (determined by `estimate()`).
    # TODO - why can't i make the value List(Float)?
    _coefficients = Dict(Tuple(Str, Str), Any, transient=True)
    _trans_fn = Dict(Tuple(Str, Str), Callable, transient=True)
    _sample = Dict(Tuple(Str, Str), Any, transient=True)
    _means = Dict(Tuple(Str, Str), Tuple(Float, Float), transient=True)

    def estimate(self, experiment, subset=None):
        """
        Estimate the mapping from the two-channel controls
        
        Parameters
        ----------
        experiment : Experiment
            The :class:`.Experiment` used to check the voltages, etc. of the
            control tubes.  Also the source of the operation history that
            is replayed on the control tubes.
            
        subset : Str
            A Python expression used to subset the controls before estimating
            the color translation parameters.
        """

        if experiment is None:
            raise util.CytoflowOpError('experiment', "No experiment specified")

        if not self.controls and not self.controls_frames:
            raise util.CytoflowOpError('controls', "No controls specified")

        self._coefficients.clear()
        self._trans_fn.clear()
        self._sample.clear()
        self._means.clear()

        tubes = {}

        if (self.controls != {}):
            controls = self.controls
        else:
            controls = self.controls_frames

        translation = {x[0]: x[1] for x in list(controls.keys())}

        for from_channel, to_channel in translation.items():

            if from_channel not in experiment.channels:
                raise util.CytoflowOpError(
                    'translation',
                    "Channel {0} not in the experiment".format(from_channel))

            if to_channel not in experiment.channels:
                raise util.CytoflowOpError(
                    'translation',
                    "Channel {0} not in the experiment".format(to_channel))

            if (from_channel, to_channel) not in controls:
                raise util.CytoflowOpError(
                    'translation', "Control file for {0} --> {1} "
                    "not specified".format(from_channel, to_channel))

            tube_file_or_frame = controls[(from_channel, to_channel)]
            tube_file_or_frame_key = (from_channel, to_channel)
            tube_conditions = self.control_conditions[(from_channel, to_channel)] \
                                    if (from_channel, to_channel) in self.control_conditions \
                                    else {}
            conditions = {
                k: experiment.data[k].dtype.name
                for k in tube_conditions.keys()
            }

            if tube_file_or_frame_key not in tubes:
                # if True:
                channels = {
                    experiment.metadata[c]["fcs_name"]: c
                    for c in experiment.channels
                }
                name_metadata = experiment.metadata['name_metadata']
                if (self.controls != {}):
                    # make a little Experiment
                    check_tube(controls[tube_file_or_frame_key], experiment)
                    tube_exp = ImportOp(tubes=[
                        Tube(file=controls[tube_file_or_frame_key],
                             conditions=tube_conditions)
                    ],
                                        conditions=conditions,
                                        channels=channels,
                                        name_metadata=name_metadata).apply()
                else:
                    tube_exp = ImportOp(tubes=[
                        Tube(frame=controls[tube_file_or_frame_key],
                             conditions=tube_conditions)
                    ],
                                        conditions=conditions,
                                        channels=channels,
                                        name_metadata=name_metadata).apply()

                # apply previous operations
                for op in experiment.history:
                    if hasattr(op, 'by'):
                        for by in op.by:
                            if 'experiment' in experiment.metadata[by]:
                                raise util.CytoflowOpError(
                                    'experiment',
                                    "Prior to applying this operation, "
                                    "you must not apply any operation with 'by' "
                                    "set to an experimental condition.")
                    tube_exp = op.apply(tube_exp)

                # subset the events
                if subset:
                    try:
                        tube_exp = tube_exp.query(subset)
                    except Exception as e:
                        raise util.CytoflowOpError(
                            'subset', "Subset string '{0}' isn't valid".format(
                                subset)) from e

                    if len(tube_exp.data) == 0:
                        raise util.CytoflowOpError(
                            'subset',
                            "Subset string '{0}' returned no events".format(
                                subset))

                tube_data = tube_exp.data

                tubes[tube_file_or_frame_key] = tube_data

            data = tubes[tube_file_or_frame_key][[from_channel,
                                                  to_channel]].copy()
            data = data[data[from_channel] > 0]
            data = data[data[to_channel] > 0]

            _ = data.reset_index(drop=True, inplace=True)

            # self._sample[(from_channel, to_channel)] = data.sample(n = min(len(data), 5000))
            self._sample[(from_channel,
                          to_channel)] = data.sample(n=min(len(data), 100))

            data[from_channel] = np.log10(data[from_channel])
            data[to_channel] = np.log10(data[to_channel])

            if self.mixture_model:
                gmm = sklearn.mixture.BayesianGaussianMixture(n_components=2,
                                                              random_state=1)
                fit = gmm.fit(data)

                self._means[(from_channel), (to_channel)] = \
                    (10 ** fit.means_[0][0], 10 ** fit.means_[1][0])

                # pick the component with the maximum mean
                idx = 0 if fit.means_[0][0] > fit.means_[1][0] else 1
                weights = [x[idx] for x in fit.predict_proba(data)]
            else:
                weights = [1] * len(data.index)

            if self.linear_model:
                # this mimics the TASBE approach, which constrains the fit to
                # a multiplicative scaling (eg, a linear fit with an intercept
                # of 0.)  I disagree that this is the right approach, which is
                # why it's not the default.

                f = lambda x: weights * (data[to_channel] - x[0] * data[
                    from_channel])
                x0 = [1]

                trans_fn = lambda data, x: np.power(data, x[0])

            else:

                # this code uses a different approach from TASBE. instead of
                # computing a multiplicative scaling constant, it computes a
                # full linear regression on the log-scaled data (ie, allowing
                # the intercept to vary as well as the slope).  this is a
                # more general model of the underlying physical behavior, and
                # fits the data better -- but it may not be more "correct."

                f = lambda x: weights * (data[to_channel] - x[0] * data[
                    from_channel] - x[1])
                x0 = [1, 0]

                trans_fn = lambda data, x: (10**x[1]) * np.power(data, x[0])

            opt = scipy.optimize.least_squares(f, x0)
            self._coefficients[(from_channel, to_channel)] = opt.x
            self._trans_fn[(
                from_channel,
                to_channel)] = lambda data, x=opt.x: trans_fn(data, x)

    def apply(self, experiment):
        """Applies the color translation to an experiment
        
        Parameters
        ----------
        experiment : Experiment
            the old_experiment to which this op is applied
            
        Returns
        -------
        Experiment 
            a new experiment with the color translation applied.  The corrected
            channels also have the following new metadata:
    
            **channel_translation** : Str
            Which channel was this one translated to?
        
            **channel_translation_fn** : Callable (pandas.Series --> pandas.Series)
            The function that translated this channel
        """

        if experiment is None:
            raise util.CytoflowOpError('experiment', "No experiment specified")

        if not self.controls and not self.controls_frames:
            raise util.CytoflowOpError('controls', "No controls specified")

        if not self._trans_fn:
            raise util.CytoflowOpError(
                None, "Transfer functions aren't set. "
                "Did you forget to call estimate()?")

        if (self.controls != {}):
            controls = self.controls
        else:
            controls = self.controls_frames

        translation = {x[0]: x[1] for x in list(controls.keys())}
        from_channels = [x[0] for x in list(controls.keys())]

        for key, val in translation.items():
            if (key, val) not in self._coefficients:
                raise util.CytoflowOpError(
                    None, "Coefficients aren't set for translation "
                    "{} --> {}.  Did you call estimate()?".format(key, val))

        new_experiment = experiment.clone()

        for channel in from_channels:
            new_experiment.data = \
                new_experiment.data[new_experiment.data[channel] > 0]

        for from_channel, to_channel in translation.items():
            trans_fn = self._trans_fn[(from_channel, to_channel)]

            new_experiment[from_channel] = trans_fn(experiment[from_channel])
            new_experiment.metadata[from_channel][
                'channel_translation_fn'] = trans_fn
            new_experiment.metadata[from_channel][
                'channel_translation'] = to_channel

        new_experiment.history.append(
            self.clone_traits(transient=lambda _: True))

        return new_experiment

    def default_view(self, **kwargs):
        """
        Returns a diagnostic plot to see if the bleedthrough spline estimation
        is working.
        
        Returns
        -------
        IView
            A diagnostic view, call :meth:`ColorTranslationDiagnostic.plot` to 
            see the diagnostic plots
        """

        v = ColorTranslationDiagnostic(op=self)
        v.trait_set(**kwargs)
        return v
class MATS2DMplCSDEEQ(MATS2DEval):

    #---------------------------------------
    # Tangential constitutive law parameters
    #---------------------------------------
    gamma_T = Float(5000.,
                    label="Gamma",
                    desc=" Tangential Kinematic hardening modulus",
                    enter_set=True,
                    auto_set=False)

    K_T = Float(10.0,
                label="K",
                desc="Tangential Isotropic harening",
                enter_set=True,
                auto_set=False)

    S_T = Float(0.00001,
                label="S",
                desc="Damage strength",
                enter_set=True,
                auto_set=False)

    r_T = Float(1.2,
                label="r",
                desc="Damage cumulation parameter",
                enter_set=True,
                auto_set=False)

    c_T = Float(1.2,
                label="c",
                desc="Damage cumulation parameter",
                enter_set=True,
                auto_set=False)

    tau_pi_bar = Float(2.0,
                       label="Tau_bar",
                       desc="Reversibility limit",
                       enter_set=True,
                       auto_set=False)

    a = Float(0.0,
              label="a",
              desc="Lateral pressure coefficient",
              enter_set=True,
              auto_set=False)

    #-------------------------------------------
    # Normal_Tension constitutive law parameters (without cumulative normal strain)
    #-------------------------------------------
    Ad = Float(10000.0,
               label="a",
               desc="brittleness coefficient",
               enter_set=True,
               auto_set=False)

    eps_0 = Float(0.0002,
                  label="a",
                  desc="threshold strain",
                  enter_set=True,
                  auto_set=False)

    #-----------------------------------------------
    # Normal_Compression constitutive law parameters
    #-----------------------------------------------
    K_N = Float(10000.,
                label="K_N",
                desc=" Normal isotropic harening",
                enter_set=True,
                auto_set=False)

    gamma_N = Float(15000.,
                    label="gamma_N",
                    desc="Normal kinematic hardening",
                    enter_set=True,
                    auto_set=False)

    sigma_0 = Float(20.,
                    label="sigma_0",
                    desc="Yielding stress",
                    enter_set=True,
                    auto_set=False)

    state_var_shapes = Property(Dict(), depends_on='n_mp')
    '''Dictionary of state variable entries with their array shapes.
    '''
    #-------------------------------------------------------------------------
    # Cached elasticity tensors
    #-------------------------------------------------------------------------

    E = tr.Float(34e+3,
                 label="E",
                 desc="Young's Modulus",
                 auto_set=False,
                 input=True)

    nu = tr.Float(0.2,
                  label='nu',
                  desc="Poison ratio",
                  auto_set=False,
                  input=True)

    def _get_lame_params(self):
        la = self.E * self.nu / ((1. + self.nu) * (1. - 2. * self.nu))
        # second Lame parameter (shear modulus)
        mu = self.E / (2. + 2. * self.nu)
        return la, mu

    D_abef = tr.Property(tr.Array, depends_on='+input')

    @tr.cached_property
    def _get_D_abef(self):
        la = self._get_lame_params()[0]
        mu = self._get_lame_params()[1]
        delta = np.identity(2)
        D_abef = (np.einsum(',ij,kl->ijkl', la, delta, delta) +
                  np.einsum(',ik,jl->ijkl', mu, delta, delta) +
                  np.einsum(',il,jk->ijkl', mu, delta, delta))

        return D_abef

    @cached_property
    def _get_state_var_shapes(self):
        return {
            'w_N_Emn': (self.n_mp, ),
            'z_N_Emn': (self.n_mp, ),
            'alpha_N_Emn': (self.n_mp, ),
            'r_N_Emn': (self.n_mp, ),
            'eps_N_p_Emn': (self.n_mp, ),
            'sigma_N_Emn': (self.n_mp, ),
            'w_T_Emn': (self.n_mp, ),
            'z_T_Emn': (self.n_mp, ),
            'alpha_T_Emna': (self.n_mp, 2),
            'eps_T_pi_Emna': (self.n_mp, 2),
        }

    #--------------------------------------------------------------
    # microplane constitutive law (normal behavior CP + TD)
    # (without cumulative normal strain for fatigue under tension)
    #--------------------------------------------------------------
    def get_normal_law(self, eps_N_Emn, w_N_Emn, z_N_Emn, alpha_N_Emn, r_N_Emn,
                       eps_N_p_Emn):

        E_N = self.E / (1.0 - 2.0 * self.nu)

        pos = eps_N_Emn > 1e-6
        H = 1.0 * pos

        sigma_n_trial = (1.0 - H * w_N_Emn) * E_N * (eps_N_Emn - eps_N_p_Emn)
        Z = self.K_N * r_N_Emn
        X = self.gamma_N * alpha_N_Emn
        h = self.sigma_0 + Z
        pos_iso = h > 1e-6
        f_trial = abs(sigma_n_trial - X) - h * pos_iso

        thres_1 = f_trial > 1e-6

        delta_lamda = f_trial / \
            (E_N + abs(self.K_N) + self.gamma_N) * thres_1
        eps_N_p_Emn = eps_N_p_Emn + delta_lamda * np.sign(sigma_n_trial - X)
        r_N_Emn = r_N_Emn + delta_lamda
        alpha_N_Emn = alpha_N_Emn + delta_lamda * np.sign(sigma_n_trial - X)

        def Z_N(z_N_Emn):
            return 1.0 / self.Ad * (-z_N_Emn) / (1.0 + z_N_Emn)

        Y_N = 0.5 * H * E_N * eps_N_Emn**2.0
        Y_0 = 0.5 * E_N * self.eps_0**2.0
        f = Y_N - (Y_0 + Z_N(z_N_Emn))

        thres_2 = f > 1e-6

        def f_w(Y):
            return 1.0 - 1.0 / (1.0 + self.Ad * (Y - Y_0))

        w_N_Emn = f_w(Y_N) * thres_2
        z_N_Emn = -w_N_Emn * thres_2

        sigma_N_Emn = (1.0 - H * w_N_Emn) * E_N * (eps_N_Emn - eps_N_p_Emn)

        return w_N_Emn, z_N_Emn, alpha_N_Emn, r_N_Emn, eps_N_p_Emn, sigma_N_Emn

    #-------------------------------------------------------------------------
    # microplane constitutive law (Tangential CSD)-(Pressure sensitive cumulative damage)
    #-------------------------------------------------------------------------
    def get_tangential_law(self, eps_T_Emna, w_T_Emn, z_T_Emn, alpha_T_Emna,
                           eps_T_pi_Emna, sigma_N_Emn):

        E_T = self.E / (1.0 + self.nu)

        sig_pi_trial = E_T * (eps_T_Emna - eps_T_pi_Emna)
        Z = self.K_T * z_T_Emn
        X = self.gamma_T * alpha_T_Emna
        norm_1 = np.sqrt(
            np.einsum('...na,...na->...n', (sig_pi_trial - X),
                      (sig_pi_trial - X)))

        f = norm_1 - self.tau_pi_bar - \
            Z + self.a * sigma_N_Emn / 3.0

        plas_1 = f > 1e-6
        elas_1 = f < 1e-6

        delta_lamda = f / \
            (E_T / (1.0 - w_T_Emn) + self.gamma_T + self.K_T) * plas_1

        norm_2 = 1.0 * elas_1 + np.sqrt(
            np.einsum('...na,...na->...n', (sig_pi_trial - X),
                      (sig_pi_trial - X))) * plas_1

        eps_T_pi_Emna[..., 0] = eps_T_pi_Emna[..., 0] + plas_1 * delta_lamda * \
            ((sig_pi_trial[..., 0] - X[..., 0]) /
             (1.0 - w_T_Emn)) / norm_2
        eps_T_pi_Emna[..., 1] = eps_T_pi_Emna[..., 1] + plas_1 * delta_lamda * \
            ((sig_pi_trial[..., 1] - X[..., 1]) /
             (1.0 - w_T_Emn)) / norm_2

        Y = 0.5 * E_T * \
            np.einsum(
                '...na,...na->...n',
                (eps_T_Emna - eps_T_pi_Emna),
                (eps_T_Emna - eps_T_pi_Emna)
            )

        w_T_Emn += ((1 - w_T_Emn) ** self.c_T) * \
            (delta_lamda * (Y / self.S_T) ** self.r_T) * \
            (self.tau_pi_bar / (self.tau_pi_bar - self.a * sigma_N_Emn / 3.0))

        alpha_T_Emna[..., 0] = alpha_T_Emna[..., 0] + plas_1 * delta_lamda * \
            (sig_pi_trial[..., 0] - X[..., 0]) / norm_2
        alpha_T_Emna[..., 1] = alpha_T_Emna[..., 1] + plas_1 * delta_lamda * \
            (sig_pi_trial[..., 1] - X[..., 1]) / norm_2

        z_T_Emn = z_T_Emn + delta_lamda

        return w_T_Emn, z_T_Emn, alpha_T_Emna, eps_T_pi_Emna

#     #-------------------------------------------------------------------------
#     # MICROPLANE-Kinematic constraints
#     #-------------------------------------------------------------------------
#     def _get_e_Emna(self, eps_Emab):
#         # Projection of apparent strain onto the individual microplanes
#         e_ni = np.einsum('nb,Emba->Emna', self._MPN, eps_Emab)
#         return e_ni
#
#     def _get_e_N_Emn(self, e_Emna):
#         # get the normal strain array for each microplane
#
#         e_N_Emn = np.einsum('nij,...ij->...n', e_Emna, self._MPN)
#         return e_N_Emn
#
#     def _get_e_T_Emna(self, e_Emna):
#         # get the tangential strain vector array for each microplane
#         MPTT_ijr = self._get__MPTT()
#         return np.einsum('nija,...ij->...na', MPTT_ijr, e_Emna)

#-------------------------------------------------
# Alternative methods for the kinematic constraint
#-------------------------------------------------

# get the operator of the microplane normals

    _MPNN = Property(depends_on='n_mp')

    @cached_property
    def _get__MPNN(self):
        MPNN_nij = np.einsum('ni,nj->nij', self._MPN, self._MPN)
        return MPNN_nij

    # get the third order tangential tensor (operator) for each microplane
    _MPTT = Property(depends_on='n_mp')

    @cached_property
    def _get__MPTT(self):
        delta = np.identity(2)
        MPTT_nijr = 0.5 * (
            np.einsum('ni,jr -> nijr', self._MPN, delta) +
            np.einsum('nj,ir -> njir', self._MPN, delta) -
            2 * np.einsum('ni,nj,nr -> nijr', self._MPN, self._MPN, self._MPN))
        return MPTT_nijr

    def _get_e_N_Emn_2(self, eps_Emab):
        # get the normal strain array for each microplane
        return np.einsum('nij,...ij->...n', self._MPNN, eps_Emab)

    def _get_e_T_Emnar_2(self, eps_Emab):
        # get the tangential strain vector array for each microplane
        MPTT_ijr = self._get__MPTT()
        return np.einsum('nija,...ij->...na', MPTT_ijr, eps_Emab)

    #--------------------------------------------------------
    # return the state variables (Damage , inelastic strains)
    #--------------------------------------------------------
    def _get_state_variables(self, eps_Emab, tn1, omegaN, z_N_Emn, alpha_N_Emn,
                             r_N_Emn, eps_N_p_Emn, sigma_N_Emn, w_T_Emn,
                             z_T_Emn, alpha_T_Emna, eps_T_pi_Emna):

        e_N_arr = self._get_e_N_Emn_2(eps_Emab)
        e_T_vct_arr = self._get_e_T_Emnar_2(eps_Emab)

        omegaN, z_N_Emn, alpha_N_Emn, r_N_Emn, eps_N_p_Emn, sigma_N_Emn = self.get_normal_law(
            e_N_arr, omegaN, z_N_Emn, alpha_N_Emn, r_N_Emn, eps_N_p_Emn)

        w_T_Emn, z_T_Emn, alpha_T_Emna, eps_T_pi_Emna = self.get_tangential_law(
            e_T_vct_arr, w_T_Emn, z_T_Emn, alpha_T_Emna, eps_T_pi_Emna,
            sigma_N_Emn)

        return omegaN, z_N_Emn, alpha_N_Emn, r_N_Emn, eps_N_p_Emn, sigma_N_Emn, w_T_Emn, z_T_Emn, alpha_T_Emna, eps_T_pi_Emna

    #-----------------------------------------------------------------
    # Returns a list of the plastic normal strain  for all microplanes.
    #-----------------------------------------------------------------
    def _get_eps_N_p_Emn(self, eps_Emab, w_N_Emn, z_N_Emn, alpha_N_Emn,
                         r_N_Emn, eps_N_p_Emn, sigma_N_Emn):

        eps_N_Emn = self._get_e_N_Emn_2(eps_Emab)

        eps_N_p_Emn = self.get_normal_law(eps_N_Emn, w_N_Emn, z_N_Emn,
                                          alpha_N_Emn, r_N_Emn, eps_N_p_Emn,
                                          sigma_N_Emn)[4]

        return eps_N_p_Emn

    #----------------------------------------------------------------
    # Returns a list of the sliding strain vector for all microplanes.
    #----------------------------------------------------------------
    def _get_eps_T_pi_arr(self, eps_Emab, w_T_Emn, z_T_Emn, alpha_T_Emna,
                          eps_T_pi_Emna, sigma_N_Emn):

        eps_T_Emna = self._get_e_T_Emnar_2(eps_Emab)

        eps_N_T_pi_Emna = self.get_tangential_law(eps_T_Emna, w_T_Emn, z_T_Emn,
                                                  alpha_T_Emna, eps_T_pi_Emna,
                                                  sigma_N_Emn)[3]

        return eps_N_T_pi_Emna

    #-------------------------------------------------------------
    # Returns a list of the integrity factors for all microplanes.
    #-------------------------------------------------------------
    def _get_phi_Emn(self, eps_Emab, w_N_Emn, z_N_Emn, alpha_N_Emn, r_N_Emn,
                     eps_N_p_Emn, w_T_Emn, z_T_Emn, alpha_T_Emna,
                     eps_T_pi_Emna, sigma_N_Emn):

        eps_N_Emn = self._get_e_N_Emn_2(eps_Emab)
        eps_T_Emna = self._get_e_T_Emnar_2(eps_Emab)

        w_N_Emn = self.get_normal_law(eps_N_Emn, w_N_Emn, z_N_Emn, alpha_N_Emn,
                                      r_N_Emn, eps_N_p_Emn)[0]

        w_T_Emn = self.get_tangential_law(eps_T_Emna, w_T_Emn, z_T_Emn,
                                          alpha_T_Emna, eps_T_pi_Emna,
                                          sigma_N_Emn)[0]

        w_Emn = np.zeros_like(w_N_Emn)

        #w_Emn = np.maximum(w_N_Emn, w_T_Emn)

        w_Emn = w_T_Emn
        #print('w_N_Emn', w_N_Emn)
        #print('w_T_Emn', w_T_Emn)
        #print('w_Emn', w_Emn)

        phi_Emn = np.sqrt(1.0 - w_Emn)
        #print('phi_Emn', phi_Emn)

        return phi_Emn

    #----------------------------------------------
    # Returns the 2nd order damage tensor 'phi_mtx'
    #----------------------------------------------
    def _get_phi_Emab(self, eps_Emab, w_N_Emn, z_N_Emn, alpha_N_Emn, r_N_Emn,
                      eps_N_p_Emn, w_T_Emn, z_T_Emn, alpha_T_Emna,
                      eps_T_pi_Emna, sigma_N_Emn):

        # scalar integrity factor for each microplane
        phi_Emn = self._get_phi_Emn(eps_Emab, w_N_Emn, z_N_Emn, alpha_N_Emn,
                                    r_N_Emn, eps_N_p_Emn, w_T_Emn, z_T_Emn,
                                    alpha_T_Emna, eps_T_pi_Emna, sigma_N_Emn)

        # integration terms for each microplanes
        phi_Emab = np.einsum('...n,n,nab->...ab', phi_Emn, self._MPW,
                             self._MPNN)

        return phi_Emab

    #----------------------------------------------------------------------
    # Returns the 4th order damage tensor 'beta4' using sum-type symmetrization
    # (cf. [Jir99], Eq.(21))
    #----------------------------------------------------------------------
    def _get_beta_Emabcd(self, eps_Emab, w_N_Emn, z_N_Emn, alpha_N_Emn,
                         r_N_Emn, eps_N_p_Emn, w_T_Emn, z_T_Emn, alpha_T_Emna,
                         eps_T_pi_Emna, sigma_N_Emn):

        delta = np.identity(2)

        phi_Emab = self._get_phi_Emab(eps_Emab, w_N_Emn, z_N_Emn, alpha_N_Emn,
                                      r_N_Emn, eps_N_p_Emn, w_T_Emn, z_T_Emn,
                                      alpha_T_Emna, eps_T_pi_Emna, sigma_N_Emn)

        # use numpy functionality (einsum) to evaluate [Jir99], Eq.(21)
        beta_Emabcd = 0.25 * (np.einsum('...ik,jl->...ijkl', phi_Emab, delta) +
                              np.einsum('...il,jk->...ijkl', phi_Emab, delta) +
                              np.einsum('...jk,il->...ijkl', phi_Emab, delta) +
                              np.einsum('...jl,ik->...ijkl', phi_Emab, delta))

        return beta_Emabcd

    #---------------------------------------------------------------------
    # Extra homogenization of damage tensor in case of two damage parameters
    # Returns the 4th order damage tensor 'beta4' using (ref. [Baz99], Eq.(63))
    #---------------------------------------------------------------------

    def _get_beta_Emabcd_2(self, eps_Emab, w_N_Emn, z_N_Emn, alpha_N_Emn,
                           r_N_Emn, eps_N_p_Emn, w_T_Emn, z_T_Emn,
                           alpha_T_Emna, eps_T_pi_Emna, sigma_N_Emn):

        # Returns the 4th order damage tensor 'beta4' using
        #(cf. [Baz99], Eq.(63))

        eps_N_Emn = self._get_e_N_Emn_2(eps_Emab)
        eps_T_Emna = self._get_e_T_Emnar_2(eps_Emab)

        w_N_Emn = self.get_normal_law(eps_N_Emn, w_N_Emn, z_N_Emn, alpha_N_Emn,
                                      r_N_Emn, eps_N_p_Emn)[0]

        w_T_Emn = self.get_tangential_law(eps_T_Emna, w_T_Emn, z_T_Emn,
                                          alpha_T_Emna, eps_T_pi_Emna,
                                          sigma_N_Emn)[0]

        delta = np.identity(2)
        beta_N = np.sqrt(1. - w_N_Emn)
        beta_T = np.sqrt(1. - w_T_Emn)

        #beta_N = 1. - w_N_Emn
        #beta_T = 1. - w_T_Emn

        #print(' w_N_Emn ',  w_N_Emn)
        #print(' w_N_Emn ', w_T_Emn)
        #print('beta_N ', beta_N)
        #print('beta_T ', beta_T)

        beta_ijkl = np.einsum('n, ...n,ni, nj, nk, nl -> ...ijkl', self._MPW, beta_N, self._MPN, self._MPN, self._MPN, self._MPN) + \
            0.25 * (np.einsum('n, ...n,ni, nk, jl -> ...ijkl', self._MPW, beta_T, self._MPN, self._MPN, delta) +
                    np.einsum('n, ...n,ni, nl, jk -> ...ijkl', self._MPW, beta_T, self._MPN, self._MPN, delta) +
                    np.einsum('n, ...n,nj, nk, il -> ...ijkl', self._MPW, beta_T, self._MPN, self._MPN, delta) +
                    np.einsum('n, ...n,nj, nl, ik -> ...ijkl', self._MPW, beta_T, self._MPN, self._MPN, delta) -
                    4.0 * np.einsum('n, ...n, ni, nj, nk, nl -> ...ijkl', self._MPW, beta_T, self._MPN, self._MPN, self._MPN, self._MPN))

        return beta_ijkl

    #-----------------------------------------------------------
    # Integration of the (inelastic) strains for each microplane
    #-----------------------------------------------------------

    def _get_eps_p_Emab(self, eps_Emab, w_N_Emn, z_N_Emn, alpha_N_Emn, r_N_Emn,
                        eps_N_p_Emn, w_T_Emn, z_T_Emn, alpha_T_Emna,
                        eps_T_pi_Emna, sigma_N_Emn):

        eps_N_Emn = self._get_e_N_Emn_2(eps_Emab)
        eps_T_Emna = self._get_e_T_Emnar_2(eps_Emab)

        # plastic normal strains
        eps_N_p_Emn = self.get_normal_law(eps_N_Emn, w_N_Emn, z_N_Emn,
                                          alpha_N_Emn, r_N_Emn, eps_N_p_Emn)[4]

        # sliding tangential strains
        eps_T_pi_Emna = self.get_tangential_law(eps_T_Emna, w_T_Emn, z_T_Emn,
                                                alpha_T_Emna, eps_T_pi_Emna,
                                                sigma_N_Emn)[3]

        print('eps_N_T_pi_Emna ', eps_T_pi_Emna)

        delta = np.identity(2)

        # 2-nd order plastic (inelastic) tensor
        eps_p_Emab = (np.einsum('n,...n,na,nb->...ab', self._MPW, eps_N_p_Emn,
                                self._MPN, self._MPN) + 0.5 *
                      (np.einsum('n,...nf,na,fb->...ab', self._MPW,
                                 eps_T_pi_Emna, self._MPN, delta) +
                       np.einsum('n,...nf,nb,fa->...ab', self._MPW,
                                 eps_T_pi_Emna, self._MPN, delta)))

        return eps_p_Emab

    #-------------------------------------------------------------------------
    # Evaluation - get the corrector and predictor
    #-------------------------------------------------------------------------

    def get_corr_pred(self, eps_Emab, t_n1, w_N_Emn, z_N_Emn, alpha_N_Emn,
                      r_N_Emn, eps_N_p_Emn, sigma_N_Emn, w_T_Emn, z_T_Emn,
                      alpha_T_Emna, eps_T_pi_Emna):

        # Corrector predictor computation.

        #------------------------------------------------------------------
        # Damage tensor (4th order) using product- or sum-type symmetrization:
        #------------------------------------------------------------------
        beta_Emabcd = self._get_beta_Emabcd(eps_Emab, w_N_Emn, z_N_Emn,
                                            alpha_N_Emn, r_N_Emn, eps_N_p_Emn,
                                            w_T_Emn, z_T_Emn, alpha_T_Emna,
                                            eps_T_pi_Emna, sigma_N_Emn)

        #------------------------------------------------------------------
        # Damaged stiffness tensor calculated based on the damage tensor beta4:
        #------------------------------------------------------------------

        D_Emabcd = np.einsum('...ijab, abef, ...cdef->...ijcd', beta_Emabcd,
                             self.D_abef, beta_Emabcd)
        #----------------------------------------------------------------------
        # Return stresses (corrector) and damaged secant stiffness matrix (predictor)
        #----------------------------------------------------------------------
        # plastic strain tensor
        eps_p_Emab = self._get_eps_p_Emab(eps_Emab, w_N_Emn, z_N_Emn,
                                          alpha_N_Emn, r_N_Emn, eps_N_p_Emn,
                                          w_T_Emn, z_T_Emn, alpha_T_Emna,
                                          eps_T_pi_Emna, sigma_N_Emn)

        # elastic strain tensor
        eps_e_Emab = eps_Emab - eps_p_Emab

        # calculation of the stress tensor
        sig_Emab = np.einsum('...abcd,...cd->...ab', D_Emabcd, eps_e_Emab)

        return D_Emabcd, sig_Emab

# class MATS2DMplCSDEEQ(MATSXDMplCDSEEQ, MATS2DEval):

# implements(IMATSEval)

#-----------------------------------------------
# number of microplanes
#-----------------------------------------------

    n_mp = Constant(360)

    #-----------------------------------------------
    # get the normal vectors of the microplanes
    #-----------------------------------------------
    _MPN = Property(depends_on='n_mp')

    @cached_property
    def _get__MPN(self):
        # microplane normals:
        alpha_list = np.linspace(0, 2 * np.pi, self.n_mp)

        MPN = np.array([[np.cos(alpha), np.sin(alpha)]
                        for alpha in alpha_list])

        return MPN

    #-------------------------------------
    # get the weights of the microplanes
    #-------------------------------------
    _MPW = Property(depends_on='n_mp')

    @cached_property
    def _get__MPW(self):
        MPW = np.ones(self.n_mp) / self.n_mp * 2

        return MPW
class FlowPeaksOp(HasStrictTraits):
    """
    This module uses the **flowPeaks** algorithm to assign events to clusters in
    an unsupervised manner.
    
    Call :meth:`estimate` to compute the clusters.
      
    Calling :meth:`apply` creates a new categorical metadata variable 
    named ``name``, with possible values ``{name}_1`` .... ``name_n`` where 
    ``n`` is the number of clusters estimated.
    
    The same model may not be appropriate for different subsets of the data set.
    If this is the case, you can use the :attr:`by` attribute to specify 
    metadata by which to aggregate the data before estimating (and applying) 
    a model.  The number of clusters is a model parameter and it may vary in 
    each subset. 

    Attributes
    ----------
    name : Str
        The operation name; determines the name of the new metadata column
        
    channels : List(Str)
        The channels to apply the clustering algorithm to.

    scale : Dict(Str : Enum("linear", "logicle", "log"))
        Re-scale the data in the specified channels before fitting.  If a 
        channel is in :attr:`channels` but not in :attr:`scale`, the current 
        package-wide default (set with :func:`set_default_scale`) is used.
    
    by : List(Str)
        A list of metadata attributes to aggregate the data before estimating
        the model.  For example, if the experiment has two pieces of metadata,
        ``Time`` and ``Dox``, setting ``by = ["Time", "Dox"]`` will fit the model 
        separately to each subset of the data with a unique combination of
        ``Time`` and ``Dox``.
        
    h : Float (default = 1.5)
        A scalar value by which to scale the covariance matrices of the 
        underlying density function.  (See ``Notes``, below, for more details.)
        
    h0 : Float (default = 1.0)
        A scalar value by which to smooth the covariance matrices of the
        underlying density function.  (See ``Notes``, below, for more details.)
        
    tol : Float (default = 0.5)
        How readily should clusters be merged?  Must be between 0 and 1.
        See ``Notes``, below, for more details.
        
    merge_dist : Float (default = 5)
        How far apart can clusters be before they are merged?  This is
        a unit-free scalar, and is approximately the maximum number of
        k-means clusters between peaks. 
        
    find_outliers : Bool (default = False)
        Should the algorithm use an extra step to identify outliers?
        
        .. note::
            I have disabled this code until I can try to make it faster.
        
    Notes
    -----
    
    This algorithm uses kmeans to find a large number of clusters, then 
    hierarchically merges those clusters.  Thus, the user does not need to
    specify the number of clusters in advance, and it can find non-convex
    clusters.  It also operates in an arbitrary number of dimensions.
    
    The merging happens in two steps.  First, the cluster centroids are used
    to estimate an underlying density function.  Then, the local maxima of
    the density function are found using a numerical optimization starting from
    each centroid, and k-means clusters that converge to the same local maximum
    are merged.  Finally, these clusters-of-clusters are merged if their local 
    maxima are (a) close enough, and (b) the density function between them is 
    smooth enough.  Thus, the final assignment of each event depends on the 
    k-means cluster it ends up in, and which cluster-of-clusters that k-means 
    centroid is assigned to.
    
    There are a lot of parameters that affect this process.  The k-means
    clustering is pretty robust (though somewhat sensitive to the number of 
    clusters, which is currently not exposed in the API.) The most important
    are exposed as attributes of the :class:`FlowPeaksOp` class.  These include:
    
     - :attr:`h`, :attr:`h0`: sometimes the density function is too "rough" to 
         find good local maxima.  These parameters smooth it out by widening the
         covariance matrices.  Increasing :attr:`h` makes the density rougher; 
         increasing :attr:`h0` makes it smoother.
              
    - :attr:`tol`: How smooth does the density function have to be between two 
        density maxima to merge them?  Must be between 0 and 1.
           
    - :attr:`merge_dist`: How close must two maxima be to merge them?  This 
        value is a unit-free scalar, and is approximately the number of
        k-means clusters between the two maxima.
        
    For details and a theoretical justification, see [1]_
    
    References
    ----------
    
    .. [1] Ge, Yongchao and Sealfon, Stuart C.  "flowPeaks: a fast unsupervised
       clustering for flow cytometry data via K-means and density peak finding" 
       Bioinformatics (2012) 28 (15): 2052-2058.         
  
    Examples
    --------
    
    .. plot::
        :context: close-figs
        
        Make a little data set.
    
        >>> import cytoflow as flow
        >>> import_op = flow.ImportOp()
        >>> import_op.tubes = [flow.Tube(file = "Plate01/RFP_Well_A3.fcs",
        ...                              conditions = {'Dox' : 10.0}),
        ...                    flow.Tube(file = "Plate01/CFP_Well_A4.fcs",
        ...                              conditions = {'Dox' : 1.0})]
        >>> import_op.conditions = {'Dox' : 'float'}
        >>> ex = import_op.apply()
    
    Create and parameterize the operation.
    
    .. plot::
        :context: close-figs
        
        >>> fp_op = flow.FlowPeaksOp(name = 'Flow',
        ...                          channels = ['V2-A', 'Y2-A'],
        ...                          scale = {'V2-A' : 'log',
        ...                                   'Y2-A' : 'log'},
        ...                          h0 = 3)
        
    Estimate the clusters
    
    .. plot::
        :context: close-figs
        
        >>> fp_op.estimate(ex)
        
    Plot a diagnostic view of the underlying density
    
    .. plot::
        :context: close-figs
        
        >>> fp_op.default_view(density = True).plot(ex)

    Apply the gate
    
    .. plot::
        :context: close-figs
        
        >>> ex2 = fp_op.apply(ex)

    Plot a diagnostic view with the event assignments
    
    .. plot::
        :context: close-figs
        
        >>> fp_op.default_view().plot(ex2)
        

    """

    id = Constant('edu.mit.synbio.cytoflow.operations.flowpeaks')
    friendly_id = Constant("FlowPeaks Clustering")

    name = CStr()
    channels = List(Str)
    scale = Dict(Str, util.ScaleEnum)
    by = List(Str)
    #     find_outliers = Bool(False)

    # parameters that control estimation, with sensible defaults
    h = util.PositiveFloat(1.5, allow_zero=False)
    h0 = util.PositiveFloat(1, allow_zero=False)
    tol = util.PositiveFloat(0.5, allow_zero=False)
    merge_dist = util.PositiveFloat(5, allow_zero=False)

    # parameters that control outlier selection, with sensible defaults

    _kmeans = Dict(Any,
                   Instance(sklearn.cluster.MiniBatchKMeans),
                   transient=True)
    _means = Dict(Any, List, transient=True)
    _normals = Dict(Any, List(Function), transient=True)
    _density = Dict(Any, Function, transient=True)
    _peaks = Dict(Any, List(Array), transient=True)
    _peak_clusters = Dict(Any, List(Array), transient=True)
    _cluster_peak = Dict(Any, List,
                         transient=True)  # kmeans cluster idx --> peak idx
    _cluster_group = Dict(Any, List,
                          transient=True)  # kmeans cluster idx --> group idx
    _scale = Dict(Str, Instance(util.IScale), transient=True)

    def estimate(self, experiment, subset=None):
        """
        Estimate the k-means clusters, then hierarchically merge them.
        
        Parameters
        ----------
        experiment : Experiment
            The :class:`.Experiment` to use to estimate the k-means clusters
            
        subset : str (default = None)
            A Python expression that specifies a subset of the data in 
            ``experiment`` to use to parameterize the operation.
        """

        if experiment is None:
            raise util.CytoflowOpError('experiment', "No experiment specified")

        if len(self.channels) == 0:
            raise util.CytoflowOpError('channels',
                                       "Must set at least one channel")

        for c in self.channels:
            if c not in experiment.data:
                raise util.CytoflowOpError(
                    'channels',
                    "Channel {0} not found in the experiment".format(c))

        for c in self.scale:
            if c not in self.channels:
                raise util.CytoflowOpError(
                    'scale', "Scale set for channel {0}, but it isn't "
                    "in the experiment".format(c))

        for b in self.by:
            if b not in experiment.conditions:
                raise util.CytoflowOpError(
                    'by', "Aggregation metadata {} not found, "
                    "must be one of {}".format(b, experiment.conditions))

        if subset:
            try:
                experiment = experiment.query(subset)
            except:
                raise util.CytoflowOpError(
                    'subset', "Subset string '{0}' isn't valid".format(subset))

            if len(experiment) == 0:
                raise util.CytoflowOpError(
                    'subset',
                    "Subset string '{0}' returned no events".format(subset))

        if self.by:
            groupby = experiment.data.groupby(self.by)
        else:
            # use a lambda expression to return a group that contains
            # all the events
            groupby = experiment.data.groupby(lambda _: True)

        # get the scale. estimate the scale params for the ENTIRE data set,
        # not subsets we get from groupby().  And we need to save it so that
        # the data is transformed the same way when we apply()
        for c in self.channels:
            if c in self.scale:
                self._scale[c] = util.scale_factory(self.scale[c],
                                                    experiment,
                                                    channel=c)
#                 if self.scale[c] == 'log':
#                     self._scale[c].mode = 'mask'
            else:
                self._scale[c] = util.scale_factory(util.get_default_scale(),
                                                    experiment,
                                                    channel=c)

        for data_group, data_subset in groupby:
            if len(data_subset) == 0:
                raise util.CytoflowOpError(
                    'by', "Group {} had no data".format(data_group))
            x = data_subset.loc[:, self.channels[:]]
            for c in self.channels:
                x[c] = self._scale[c](x[c])

            # drop data that isn't in the scale range
            for c in self.channels:
                x = x[~(np.isnan(x[c]))]
            x = x.values

            #### choose the number of clusters and fit the kmeans
            num_clusters = [
                util.num_hist_bins(x[:, c]) for c in range(len(self.channels))
            ]
            num_clusters = np.ceil(np.median(num_clusters))
            num_clusters = int(num_clusters)

            self._kmeans[data_group] = kmeans = \
                sklearn.cluster.MiniBatchKMeans(n_clusters = num_clusters,
                                                random_state = 0)

            kmeans.fit(x)
            x_labels = kmeans.predict(x)
            d = len(self.channels)

            #### use the kmeans centroids to parameterize a finite gaussian
            #### mixture model which estimates the density function

            s0 = np.zeros([d, d])
            for j in range(d):
                r = x[d].max() - x[d].min()
                s0[j, j] = (r / (num_clusters**(1. / d)))**0.5

            means = []
            weights = []
            normals = []

            for k in range(num_clusters):
                xk = x[x_labels == k]
                num_k = np.sum(x_labels == k)
                weight_k = num_k / len(x_labels)
                mu = xk.mean(axis=0)
                means.append(mu)
                s = np.cov(xk, rowvar=False)

                el = num_k / (num_clusters + num_k)
                s_smooth = el * self.h * s + (1.0 - el) * self.h0 * s0

                n = scipy.stats.multivariate_normal(mean=mu, cov=s_smooth)
                weights.append(weight_k)
                normals.append(lambda x, n=n: n.pdf(x))

            self._means[data_group] = means
            self._normals[data_group] = normals
            self._density[
                data_group] = density = lambda x, weights=weights, normals=normals: np.sum(
                    [w * n(x) for w, n in zip(weights, normals)], axis=0)

        ### use optimization on the finite gmm to find the local peak for
        ### each kmeans cluster
        for data_group, data_subset in groupby:
            kmeans = self._kmeans[data_group]
            num_clusters = kmeans.n_clusters
            means = self._means[data_group]
            density = self._density[data_group]
            peaks = []
            peak_clusters = []  # peak idx --> list of clusters

            min_mu = [np.inf] * len(self.channels)
            max_mu = [-1.0 * np.inf] * len(self.channels)

            for k in range(num_clusters):
                mu = means[k]
                for ci in range(len(self.channels)):
                    if mu[ci] < min_mu[ci]:
                        min_mu[ci] = mu[ci]
                    if mu[ci] > max_mu[ci]:
                        max_mu[ci] = mu[ci]

            for k in range(num_clusters):
                mu = means[k]
                f = lambda x: -1.0 * density(x)

                res = scipy.optimize.minimize(f,
                                              mu,
                                              method="CG",
                                              options={'gtol': 1e-3})

                if not res.success:
                    warn(
                        "Peak finding failed for cluster {}: {}".format(
                            k, res.message), util.CytoflowWarning)


#                 ### The peak-searching algorithm from the paper.  works fine,
#                 ### but slow!  we get similar results with the conjugate gradient
#                 ### optimization method from scipy

#                 x0 = x = means[k]
#                 k0 = k
#                 b = beta_max[k] / 10.0
#                 Nsuc = 0
#                 n = 0
#
#                 while(n < 1000):
# #                     df = scipy.misc.derivative(density, x, 1e-6)
#                     df = statsmodels.tools.numdiff.approx_fprime(x, density)
#                     if np.linalg.norm(df) < 1e-3:
#                         break
#
#                     y = x + b * df / np.linalg.norm(df)
#                     if density(y) <= density(x):
#                         Nsuc = 0
#                         b = b / 2.0
#                         continue
#
#                     Nsuc += 1
#                     if Nsuc >= 2:
#                         b = min(2*b, beta_max[k])
#
#                     ky = kmeans.predict(y[np.newaxis, :])[0]
#                     if ky == k:
#                         x = y
#                     else:
#                         k = ky
#                         b = beta_max[k] / 10.0
#                         mu = means[k]
#                         if density(mu) > density(y):
#                             x = mu
#                         else:
#                             x = y
#
#                     n += 1

                merged = False
                for pi, p in enumerate(peaks):
                    # TODO - this probably only works for scaled measurements
                    if np.linalg.norm(p - res.x) < (1e-2):
                        peak_clusters[pi].append(k)
                        merged = True
                        break

                if not merged:
                    peak_clusters.append([k])
                    peaks.append(res.x)

            self._peaks[data_group] = peaks
            self._peak_clusters[data_group] = peak_clusters

            ### merge peaks that are sufficiently close

        for data_group, data_subset in groupby:
            kmeans = self._kmeans[data_group]
            num_clusters = kmeans.n_clusters
            means = self._means[data_group]
            density = self._density[data_group]
            peaks = self._peaks[data_group]
            peak_clusters = self._peak_clusters[data_group]

            groups = [[x] for x in range(len(peaks))]
            peak_groups = [x for x in range(len(peaks))
                           ]  # peak idx --> group idx

            def max_tol(x, y):
                f = lambda a: density(a[np.newaxis, :])
                #                 lx = kmeans.predict(x[np.newaxis, :])[0]
                #                 ly = kmeans.predict(y[np.newaxis, :])[0]
                n = len(x)
                n_scale = 1

                #                 n_scale = np.sqrt(((nx + ny) / 2.0) / (n / num_clusters))

                def tol(t):
                    zt = x + t * (y - x)
                    fhat_zt = f(x) + t * (f(y) - f(x))
                    return -1.0 * abs((f(zt) - fhat_zt) / fhat_zt) * n_scale

                res = scipy.optimize.minimize_scalar(tol,
                                                     bounds=[0, 1],
                                                     method='Bounded')

                if res.status != 0:
                    raise util.CytoflowOpError(
                        None,
                        "tol optimization failed for {}, {}".format(x, y))
                return -1.0 * res.fun

            def nearest_neighbor_dist(k):
                min_dist = np.inf

                for i in range(num_clusters):
                    if i == k:
                        continue
                    dist = np.linalg.norm(means[k] - means[i])
                    if dist < min_dist:
                        min_dist = dist

                return min_dist

            sk = [nearest_neighbor_dist(x) for x in range(num_clusters)]

            def s(x):
                k = kmeans.predict(x[np.newaxis, :])[0]
                return sk[k]

            def can_merge(g, h):
                for pg in g:
                    for ph in h:
                        vg = peaks[pg]
                        vh = peaks[ph]
                        dist_gh = np.linalg.norm(vg - vh)

                        if max_tol(vg, vh) < self.tol and dist_gh / (
                                s(vg) + s(vh)) <= self.merge_dist:
                            return True

                return False

            while True:
                if len(groups) == 1:
                    break

                # find closest mergable groups
                min_dist = np.inf
                for gi in range(len(groups)):
                    g = groups[gi]

                    for hi in range(gi + 1, len(groups)):
                        h = groups[hi]

                        if can_merge(g, h):
                            dist_gh = np.inf
                            for pg in g:
                                vg = peaks[pg]
                                for ph in h:
                                    vh = peaks[ph]
                                    #                                     print("vg {} vh {}".format(vg, vh))
                                    dist_gh = min(dist_gh,
                                                  np.linalg.norm(vg - vh))

                            if dist_gh < min_dist:
                                min_gi = gi
                                min_hi = hi
                                min_dist = dist_gh

                if min_dist == np.inf:
                    break

                # merge the groups
                groups[min_gi].extend(groups[min_hi])
                for g in groups[min_hi]:
                    peak_groups[g] = min_gi
                del groups[min_hi]

            cluster_group = [0] * num_clusters
            cluster_peaks = [0] * num_clusters

            for gi, g in enumerate(groups):
                for p in g:
                    for cluster in peak_clusters[p]:
                        cluster_group[cluster] = gi
                        cluster_peaks[cluster] = p

            self._cluster_peak[data_group] = cluster_peaks
            self._cluster_group[data_group] = cluster_group

    def apply(self, experiment):
        """
        Assign events to a cluster.
        
        Assigns each event to one of the k-means centroids from :meth:`estimate`,
        then groups together events in the same cluster hierarchy.
        
        Parameters
        ----------
        experiment : Experiment
            the :class:`.Experiment` to apply the gate to.
            
        Returns
        -------
        Experiment
            A new :class:`.Experiment` with the gate applied to it.  
            TODO - document the extra statistics
        """

        if experiment is None:
            raise util.CytoflowOpError('experiment', "No experiment specified")

        # make sure name got set!
        if not self.name:
            raise util.CytoflowOpError(
                'name', "You have to set the gate's name "
                "before applying it!")

        if self.name != util.sanitize_identifier(self.name):
            raise util.CytoflowOpError(
                'name',
                "Name can only contain letters, numbers and underscores.".
                format(self.name))

        if self.name in experiment.data.columns:
            raise util.CytoflowOpError(
                'name',
                "Experiment already has a column named {0}".format(self.name))

        if len(self.channels) == 0:
            raise util.CytoflowOpError('channels',
                                       "Must set at least one channel")

        if not self._peaks:
            raise util.CytoflowOpError(
                None, "No model found.  Did you forget to "
                "call estimate()?")

        for c in self.channels:
            if c not in experiment.data:
                raise util.CytoflowOpError(
                    'channels',
                    "Channel {0} not found in the experiment".format(c))

        for c in self.scale:
            if c not in self.channels:
                raise util.CytoflowOpError(
                    'scale', "Scale set for channel {0}, but it isn't "
                    "in the experiment".format(c))

        for b in self.by:
            if b not in experiment.conditions:
                raise util.CytoflowOpError(
                    'by', "Aggregation metadata {} not found, "
                    "must be one of {}".format(b, experiment.conditions))

        if self.by:
            groupby = experiment.data.groupby(self.by)
        else:
            # use a lambda expression to return a group that contains
            # all the events
            groupby = experiment.data.groupby(lambda _: True)

        event_assignments = pd.Series(["{}_None".format(self.name)] *
                                      len(experiment),
                                      dtype="object")

        # make the statistics
        #         clusters = [x + 1 for x in range(self.num_clusters)]
        #
        #         idx = pd.MultiIndex.from_product([experiment[x].unique() for x in self.by] + [clusters] + [self.channels],
        #                                          names = list(self.by) + ["Cluster"] + ["Channel"])
        #         centers_stat = pd.Series(index = idx, dtype = np.dtype(object)).sort_index()

        for group, data_subset in groupby:
            if len(data_subset) == 0:
                raise util.CytoflowOpError(
                    'by', "Group {} had no data".format(group))

            if group not in self._kmeans:
                raise util.CytoflowOpError(
                    'by', "Group {} not found in the estimated "
                    "model.  Do you need to re-run estimate()?".format(group))

            x = data_subset.loc[:, self.channels[:]]

            for c in self.channels:
                x[c] = self._scale[c](x[c])

            # which values are missing?

            x_na = pd.Series([False] * len(x))
            for c in self.channels:
                x_na[np.isnan(x[c]).values] = True

            x = x.values
            x_na = x_na.values
            group_idx = groupby.groups[group]

            kmeans = self._kmeans[group]

            predicted_km = np.full(len(x), -1, "int")
            predicted_km[~x_na] = kmeans.predict(x[~x_na])

            groups = np.asarray(self._cluster_group[group])
            predicted_group = np.full(len(x), -1, "int")
            predicted_group[~x_na] = groups[predicted_km[~x_na]]

            # outlier detection code.  this is disabled for the moment
            # because it is really slow.

            #             num_groups = len(set(groups))
            #             if self.find_outliers:
            #                 density = self._density[group]
            #                 max_d = [-1.0 * np.inf] * num_groups
            #
            #                 for xi in range(len(x)):
            #                     if x_na[xi]:
            #                         continue
            #
            #                     x_c = predicted_group[xi]
            #                     d_x_c = density(x[xi])
            #                     if d_x_c > max_d[x_c]:
            #                         max_d[x_c] = d_x_c
            #
            #                 group_density = [None] * num_groups
            #                 group_weight = [0.0] * num_groups
            #
            #                 for c in range(num_groups):
            #                     num_c = np.sum(predicted_group == c)
            #                     clusters = np.argwhere(groups == c).flatten()
            #
            #                     normals = []
            #                     weights = []
            #                     for k in range(len(clusters)):
            #                         num_k = np.sum(predicted_km == k)
            #                         weight_k = num_k / num_c
            #                         group_weight[c] += num_k / len(x)
            #                         weights.append(weight_k)
            #                         normals.append(self._normals[group][k])
            #
            #                     group_density[c] = lambda x, weights = weights, normals = normals: np.sum([w * n(x) for w, n in zip(weights, normals)], axis = 0)
            #
            #                 for xi in range(len(x)):
            #                     if x_na[xi]:
            #                         continue
            #
            #                     x_c = predicted_group[xi]
            #
            #                     if density(x[xi]) / max_d[x_c] < 0.01:
            #                         predicted_group[xi] = -1
            #                         continue
            #
            #                     sum_d = 0
            #                     for c in set(groups):
            #                         sum_d += group_weight[c] * group_density[c](x[xi])
            #
            #                     if group_weight[x_c] * group_density[x_c](x[xi]) / sum_d < 0.8:
            #                         predicted_group[xi] = -1

            #
            #                     max_d = -1.0 * np.inf
            #                     for x_c in x[predicted_group == c]:
            #                         x_c_d = density(x_c)
            #                         if x_c_d > max_d:
            #                             max_d = x_c_d
            #
            #                     for i in range(len(x)):
            #                         if predicted_group[i] == c and density(x[i]) / max_d <= 0.01:
            #                             predicted_group[i] = -1
            #
            #

            predicted_str = pd.Series(["(none)"] * len(predicted_group))
            for c in range(len(self._cluster_group[group])):
                predicted_str[predicted_group == c] = "{0}_{1}".format(
                    self.name, c + 1)
            predicted_str[predicted_group == -1] = "{0}_None".format(self.name)
            predicted_str.index = group_idx

            event_assignments.iloc[group_idx] = predicted_str

        new_experiment = experiment.clone()
        new_experiment.add_condition(self.name, "category", event_assignments)

        #         new_experiment.statistics[(self.name, "centers")] = pd.to_numeric(centers_stat)

        new_experiment.history.append(
            self.clone_traits(transient=lambda _: True))
        return new_experiment

    def default_view(self, **kwargs):
        """
        Returns a diagnostic plot of the Gaussian mixture model.
        
        Parameters
        ----------
        channels : List(Str)
            Which channels to plot?  Must be contain either one or two channels.
            
        scale : List({'linear', 'log', 'logicle'})
            How to scale the channels before plotting them
            
        density : bool
            Should we plot a scatterplot or the estimated density function?
         
        Returns
        -------
        IView
            an IView, call :meth:`plot` to see the diagnostic plot.
        """
        channels = kwargs.pop('channels', self.channels)
        scale = kwargs.pop('scale', self.scale)
        density = kwargs.pop('density', False)

        for c in channels:
            if c not in self.channels:
                raise util.CytoflowViewError(
                    'channels',
                    "Channel {} isn't in the operation's channels".format(c))

        for s in scale:
            if s not in self.channels:
                raise util.CytoflowViewError(
                    'channels',
                    "Channel {} isn't in the operation's channels".format(s))

        for c in channels:
            if c not in scale:
                scale[c] = util.get_default_scale()

        if len(channels) == 0:
            raise util.CytoflowViewError(
                'channels',
                "Must specify at least one channel for a default view")
        elif len(channels) == 1:
            v = FlowPeaks1DView(op=self)
            v.trait_set(channel=channels[0],
                        scale=scale[channels[0]],
                        **kwargs)
            return v

        elif len(channels) == 2:
            if density:
                v = FlowPeaks2DDensityView(op=self)
                v.trait_set(xchannel=channels[0],
                            ychannel=channels[1],
                            xscale=scale[channels[0]],
                            yscale=scale[channels[1]],
                            **kwargs)
                return v

            else:
                v = FlowPeaks2DView(op=self)
                v.trait_set(xchannel=channels[0],
                            ychannel=channels[1],
                            xscale=scale[channels[0]],
                            yscale=scale[channels[1]],
                            **kwargs)
                return v
        else:
            raise util.CytoflowViewError(
                None,
                "Can't specify more than two channels for a default view")
class FlowPeaks2DView(By2DView, AnnotatingView, ScatterplotView):
    """
    A two-dimensional diagnostic view for :class:`FlowPeaksOp`.  Plots a 
    scatter-plot of the two channels, then overlays the k-means centroids in 
    blue and the clusters-of-k-means in pink.

    Attributes
    ----------

    """

    id = Constant('edu.mit.synbio.cytoflow.view.flowpeaks2dview')
    friendly_id = Constant("FlowPeaks 2D Diagnostic Plot")

    xchannel = Str
    ychannel = Str
    xscale = util.ScaleEnum
    yscale = util.ScaleEnum

    def plot(self, experiment, **kwargs):
        """
        Plot the plots.
        
        Parameters
        ----------
        
        """

        if experiment is None:
            raise util.CytoflowViewError('experiment',
                                         "No experiment specified")

        annotations = {}
        for k in self.op._kmeans:
            annotations[k] = (self.op._kmeans[k], self.op._peaks[k],
                              self.op._cluster_peak[k])

        view, trait_name = self._strip_trait(self.op.name)

        if self.xchannel in self.op._scale:
            xscale = self.op._scale[self.xchannel]
        else:
            xscale = util.scale_factory(self.xscale,
                                        experiment,
                                        channel=self.xchannel)

        if self.ychannel in self.op._scale:
            yscale = self.op._scale[self.ychannel]
        else:
            yscale = util.scale_factory(self.yscale,
                                        experiment,
                                        channel=self.ychannel)

        super(FlowPeaks2DView, view).plot(experiment,
                                          annotation_facet=self.op.name,
                                          annotation_trait=trait_name,
                                          annotations=annotations,
                                          xscale=xscale,
                                          yscale=yscale,
                                          **kwargs)

    def _annotation_plot(self, axes, annotation, annotation_facet,
                         annotation_value, annotation_color, **kwargs):

        ix = self.op.channels.index(self.xchannel)
        iy = self.op.channels.index(self.ychannel)

        xscale = kwargs['xscale']
        yscale = kwargs['yscale']

        km = annotation[0]
        peaks = annotation[1]
        cluster_peak = annotation[2]

        for k in range(len(km.cluster_centers_)):
            x = self.op._scale[self.xchannel].inverse(
                km.cluster_centers_[k][ix])
            y = self.op._scale[self.ychannel].inverse(
                km.cluster_centers_[k][iy])

            plt.plot(x, y, '*', color='blue')

            peak_idx = cluster_peak[k]
            peak = peaks[peak_idx]
            peak_x = xscale.inverse(peak[0])
            peak_y = yscale.inverse(peak[1])

            plt.plot([x, peak_x], [y, peak_y])

        for peak in peaks:
            x = self.op._scale[self.ychannel].inverse(peak[0])
            y = self.op._scale[self.xchannel].inverse(peak[1])
            plt.plot(x, y, 'o', color="magenta")
Beispiel #8
0
class GaussianMixture2DPluginOp(PluginOpMixin, GaussianMixtureOp):
    id = Constant('edu.mit.synbio.cytoflowgui.operations.gaussian_2d')

    handler_factory = Callable(GaussianMixture2DHandler)

    xchannel = Str(estimate=True)
    ychannel = Str(estimate=True)
    xscale = util.ScaleEnum(estimate=True)
    yscale = util.ScaleEnum(estimate=True)

    # add "estimate" metadata
    num_components = util.PositiveCInt(1, estimate=True)
    sigma = util.PositiveCFloat(0.0, allow_zero=True, estimate=True)
    by = List(Str, estimate=True)

    _gmms = Dict(Any, Instance(mixture.GaussianMixture), transient=True)

    # bits to support the subset editor

    subset_list = List(ISubset, estimate=True)
    subset = Property(Str, depends_on="subset_list.str")

    # MAGIC - returns the value of the "subset" Property, above
    def _get_subset(self):
        return " and ".join(
            [subset.str for subset in self.subset_list if subset.str])

    @on_trait_change('subset_list.str')
    def _subset_changed(self, obj, name, old, new):
        self.changed = (Changed.ESTIMATE, ('subset_list', self.subset_list))

    @on_trait_change('xchannel, ychannel')
    def _channel_changed(self):
        self.channels = []
        self.scale = {}
        if self.xchannel:
            self.channels.append(self.xchannel)

            if self.xchannel in self.scale:
                del self.scale[self.xchannel]

            self.scale[self.xchannel] = self.xscale

        if self.ychannel:
            self.channels.append(self.ychannel)

            if self.ychannel in self.scale:
                del self.scale[self.ychannel]

            self.scale[self.ychannel] = self.yscale

    @on_trait_change('xscale, yscale')
    def _scale_changed(self):
        self.scale = {}

        if self.xchannel:
            self.scale[self.xchannel] = self.xscale

        if self.ychannel:
            self.scale[self.ychannel] = self.yscale

    def default_view(self, **kwargs):
        return GaussianMixture2DPluginView(op=self, **kwargs)

    def estimate(self, experiment):
        if not self.xchannel:
            raise util.CytoflowOpError('xchannel', "Must set X channel")

        if not self.ychannel:
            raise util.CytoflowOpError('ychannel', "Must set Y channel")

        super().estimate(experiment, subset=self.subset)
        self.changed = (Changed.ESTIMATE_RESULT, self)

    def clear_estimate(self):
        self._gmms.clear()
        self.changed = (Changed.ESTIMATE_RESULT, self)

    def get_notebook_code(self, idx):
        op = GaussianMixtureOp()
        op.copy_traits(self, op.copyable_trait_names())

        return dedent("""
        op_{idx} = {repr}
        
        op_{idx}.estimate(ex_{prev_idx}{subset})
        ex_{idx} = op_{idx}.apply(ex_{prev_idx})
        """.format(repr=repr(op),
                   idx=idx,
                   prev_idx=idx - 1,
                   subset=", subset = " +
                   repr(self.subset) if self.subset else ""))
Beispiel #9
0
class RangeSelection(cytoflow.views.HistogramView):
    """Plots, and lets the user interact with, a selection on the X axis.
    
    Is it beautiful?  No.  Does it demonstrate the capabilities I desire?  Yes.
    
    Attributes
    ----------
    op : Instance(RangeOp)
        the RangeOp instance that this view is, well, viewing
        
    huefacet : Str
        The conditioning variable to show multiple colors on this plot
        
    subset : Str
        The string passed to `Experiment.query()` to subset the data before
        plotting
        
    interactive : Bool
        is this view interactive?  Ie, can the user set min and max
        with a mouse drag?
        
    Notes
    -----
    We inherit `xfacet` and `yfacet` from `cytoflow.views.HistogramView`, but
    they must both be unset!
        
    Examples
    --------
    
    In an IPython notebook with `%matplotlib notebook`
    
    >>> r = RangeOp(name = "RangeGate",
    ...             channel = 'Y2-A')
    >>> rv = r.default_view()
    >>> rv.interactive = True
    >>> rv.plot(ex2)
    >>> ### draw a range on the plot ###
    >>> print r.low, r.high
    """

    id = Constant('edu.mit.synbio.cytoflow.views.range')
    friendly_id = Constant("Range Selection")

    op = Instance(IOperation)
    name = DelegatesTo('op')
    channel = DelegatesTo('op')
    low = DelegatesTo('op')
    high = DelegatesTo('op')
    interactive = Bool(False, transient=True)

    # internal state.
    _ax = Any(transient=True)
    _span = Instance(SpanSelector, transient=True)
    _cursor = Instance(Cursor, transient=True)
    _low_line = Instance(Line2D, transient=True)
    _high_line = Instance(Line2D, transient=True)
    _hline = Instance(Line2D, transient=True)

    def plot(self, experiment, **kwargs):
        """Plot the underlying histogram and then plot the selection on top of it."""

        if experiment is None:
            raise util.CytoflowViewError("No experiment specified")

        if self.xfacet:
            raise util.CytoflowViewError(
                "RangeSelection.xfacet must be empty or `Undefined`")

        if self.yfacet:
            raise util.CytoflowViewError(
                "RangeSelection.yfacet must be empty or `Undefined`")

        super(RangeSelection, self).plot(experiment, **kwargs)
        self._ax = plt.gca()
        self._draw_span()
        self._interactive()

    @on_trait_change('low,high', post_init=True)
    def _draw_span(self):
        if not (self._ax and self.low and self.high):
            return

        if self._low_line and self._low_line in self._ax.lines:
            self._low_line.remove()

        if self._high_line and self._high_line in self._ax.lines:
            self._high_line.remove()

        if self._hline and self._hline in self._ax.lines:
            self._hline.remove()

        self._low_line = plt.axvline(self.low, linewidth=3, color='blue')
        self._high_line = plt.axvline(self.high, linewidth=3, color='blue')

        ymin, ymax = plt.ylim()
        y = (ymin + ymax) / 2.0
        self._hline = plt.plot([self.low, self.high], [y, y],
                               color='blue',
                               linewidth=2)[0]

        plt.draw()

    @on_trait_change('interactive', post_init=True)
    def _interactive(self):
        if self._ax and self.interactive:
            self._cursor = Cursor(self._ax,
                                  horizOn=False,
                                  vertOn=True,
                                  color='blue',
                                  useblit=True)

            self._span = SpanSelector(self._ax,
                                      onselect=self._onselect,
                                      direction='horizontal',
                                      rectprops={
                                          'alpha': 0.3,
                                          'color': 'grey'
                                      },
                                      span_stays=False,
                                      useblit=True)
        else:
            self._cursor = None
            self._span = None

    def _onselect(self, xmin, xmax):
        """Update selection traits"""
        self.low = xmin
        self.high = xmax
Beispiel #10
0
class PolygonSelection(Op2DView, ScatterplotView):
    """
    Plots, and lets the user interact with, a 2D polygon selection.
    
    Attributes
    ----------
    interactive : bool
        is this view interactive?  Ie, can the user set the polygon verticies
        with mouse clicks?
        
    Notes
    -----
    We inherit :attr:`xfacet` and :attr:`yfacet` from 
    :class:`cytoflow.views.ScatterPlotView`, but they must both be unset!
        
    Examples
    --------

    In a Jupyter notebook with `%matplotlib notebook`
    
    >>> s = flow.ScatterplotView(xchannel = "V2-A",
    ...                          ychannel = "Y2-A")
    >>> poly = s.default_view()
    >>> poly.plot(ex2)
    >>> poly.interactive = True
    """

    id = Constant('edu.mit.synbio.cytoflow.views.polygon')
    friendly_id = Constant("Polygon Selection")

    xfacet = Constant(None)
    yfacet = Constant(None)

    xscale = DelegatesTo('op', prefix='_xscale')
    yscale = DelegatesTo('op', prefix='_yscale')

    interactive = Bool(False, transient=True)

    # internal state.
    _ax = Any(transient=True)
    _widget = Instance(util.PolygonSelector, transient=True)
    _patch = Instance(mpl.patches.PathPatch, transient=True)

    def plot(self, experiment, **kwargs):
        """
        Plot the scatter plot, and then plot the selection on top of it.
        
        Parameters
        ----------
        
        """

        super(PolygonSelection, self).plot(experiment, **kwargs)
        self._ax = plt.gca()
        self._draw_poly()
        self._interactive()

    @on_trait_change('op.vertices', post_init=True)
    def _draw_poly(self):
        if not self._ax:
            return

        if self._patch and self._patch in self._ax.patches:
            self._patch.remove()

        if not self.op.vertices or len(self.op.vertices) < 3:
            return

        patch_vert = np.concatenate(
            (np.array(self.op.vertices), np.array((0, 0), ndmin=2)))

        self._patch = \
            mpl.patches.PathPatch(mpl.path.Path(patch_vert, closed = True),
                                  edgecolor="black",
                                  linewidth = 2,
                                  fill = False)

        self._ax.add_patch(self._patch)
        plt.draw()

    @on_trait_change('interactive', post_init=True)
    def _interactive(self):
        if self._ax and self.interactive:
            self._widget = util.PolygonSelector(self._ax,
                                                self._onselect,
                                                useblit=True)
        elif self._widget:
            self._widget = None

    def _onselect(self, vertices):
        self.op.vertices = vertices
Beispiel #11
0
class PolygonOp(HasStrictTraits):
    """
    Apply a polygon gate to a cytometry experiment.
    
    Attributes
    ----------
    name : Str
        The operation name.  Used to name the new metadata field in the
        experiment that's created by :meth:`apply`
        
    xchannel : Str
        The name of the x channel to apply the gate.
        
    ychannel : Str
        The name of the y channel to apply the gate.
        
    vertices : List((Float, Float))
        The polygon verticies.  An ordered list of 2-tuples, representing
        the x and y coordinates of the vertices.
        
    Notes
    -----
    This module uses :meth:`matplotlib.path.Path` to represent the polygon, because
    membership testing is very fast.
    
    You can set the verticies by hand, I suppose, but it's much easier to use
    the interactive view you get from :meth:`default_view` to do so.
    
    Examples
    --------
    
    .. plot::
        :context: close-figs
        
        Make a little data set.
    
        >>> import cytoflow as flow
        >>> import_op = flow.ImportOp()
        >>> import_op.tubes = [flow.Tube(file = "Plate01/RFP_Well_A3.fcs",
        ...                              conditions = {'Dox' : 10.0}),
        ...                    flow.Tube(file = "Plate01/CFP_Well_A4.fcs",
        ...                              conditions = {'Dox' : 1.0})]
        >>> import_op.conditions = {'Dox' : 'float'}
        >>> ex = import_op.apply()
    
    Create and parameterize the operation.
    
    .. plot::
        :context: close-figs
        
        >>> p = flow.PolygonOp(name = "Polygon",
        ...                    xchannel = "V2-A",
        ...                    ychannel = "Y2-A")
        >>> p.vertices = [(23.411982294776319, 5158.7027015021222), 
        ...               (102.22182270573683, 23124.058843387455), 
        ...               (510.94519955277201, 23124.058843387455), 
        ...               (1089.5215641232173, 3800.3424832180476), 
        ...               (340.56382570202402, 801.98947404942271), 
        ...               (65.42597937575897, 1119.3133482602157)]

        
    Show the default view.  

    .. plot::
        :context: close-figs
            
        >>> p.default_view(huefacet = "Dox",
        ...                xscale = 'log',
        ...                yscale = 'log').plot(ex)
        
    Apply the gate, and show the result
    
    .. plot::
        :context: close-figs
        
        >>> ex2 = p.apply(ex)
        >>> ex2.data.groupby('Polygon').size()
        Polygon
        False    15875
        True      4125
        dtype: int64
            
    """

    # traits
    id = Constant('edu.mit.synbio.cytoflow.operations.polygon')
    friendly_id = Constant("Polygon")

    name = CStr()
    xchannel = Str()
    ychannel = Str()
    vertices = List((Float, Float))

    _xscale = util.ScaleEnum()
    _yscale = util.ScaleEnum()

    def apply(self, experiment):
        """Applies the threshold to an experiment.
        
        Parameters
        ----------
        experiment : Experiment
            the old :class:`Experiment` to which this op is applied
            
        Returns
        -------
        Experiment
            a new :class:'Experiment`, the same as ``old_experiment`` but with 
            a new column of type `bool` with the same as the operation name.  
            The bool is ``True`` if the event's measurement is within the 
            polygon, and ``False`` otherwise.
            
        Raises
        ------
        util.CytoflowOpError
            if for some reason the operation can't be applied to this
            experiment. The reason is in :attr:`.CytoflowOpError.args`
        """

        if experiment is None:
            raise util.CytoflowOpError('experiment', "No experiment specified")

        if self.name in experiment.data.columns:
            raise util.CytoflowOpError(
                'name', "{} is in the experiment already!".format(self.name))

        if not self.xchannel:
            raise util.CytoflowOpError('xchannel', "Must specify an x channel")

        if not self.ychannel:
            raise util.CytoflowOpError('ychannel', "Must specify a y channel")

        if not self.xchannel in experiment.channels:
            raise util.CytoflowOpError(
                'xchannel',
                "xchannel {0} is not in the experiment".format(self.xchannel))

        if not self.ychannel in experiment.channels:
            raise util.CytoflowOpError(
                'ychannel',
                "ychannel {0} is not in the experiment".format(self.ychannel))

        if len(self.vertices) < 3:
            raise util.CytoflowOpError('vertices',
                                       "Must have at least 3 vertices")

        if any([len(x) != 2 for x in self.vertices]):
            return util.CytoflowOpError(
                'vertices', "All vertices must be lists or tuples "
                "of length = 2")

        # make sure name got set!
        if not self.name:
            raise util.CytoflowOpError(
                'name', "You have to set the Polygon gate's name "
                "before applying it!")

        # make sure old_experiment doesn't already have a column named self.name
        if (self.name in experiment.data.columns):
            raise util.CytoflowOpError(
                'name',
                "Experiment already contains a column {0}".format(self.name))

        # there's a bit of a subtlety here: if the vertices were
        # selected with an interactive plot, and that plot had scaled
        # axes, we need to apply that scale function to both the
        # vertices and the data before looking for path membership
        xscale = util.scale_factory(self._xscale,
                                    experiment,
                                    channel=self.xchannel)
        yscale = util.scale_factory(self._yscale,
                                    experiment,
                                    channel=self.ychannel)

        vertices = [(xscale(x), yscale(y)) for (x, y) in self.vertices]
        data = experiment.data[[self.xchannel, self.ychannel]].copy()
        data[self.xchannel] = xscale(data[self.xchannel])
        data[self.ychannel] = yscale(data[self.ychannel])

        # use a matplotlib Path because testing for membership is a fast C fn.
        path = mpl.path.Path(np.array(vertices))
        xy_data = data.as_matrix(columns=[self.xchannel, self.ychannel])

        new_experiment = experiment.clone()
        new_experiment.add_condition(self.name, "bool",
                                     path.contains_points(xy_data))
        new_experiment.history.append(
            self.clone_traits(transient=lambda t: True))

        return new_experiment

    def default_view(self, **kwargs):
        p = PolygonSelection(op=self)
        p.trait_set(**kwargs)
        return p
Beispiel #12
0
class Kde2DView(Base2DView):
    """
    Plots a 2-d kernel-density estimate.  Sort of like a smoothed histogram.
    The density is visualized with a set of isolines.
        
    Attributes
    ----------

    Examples
    --------
    
    Make a little data set.
    
    .. plot::
        :context: close-figs
            
        >>> import cytoflow as flow
        >>> import_op = flow.ImportOp()
        >>> import_op.tubes = [flow.Tube(file = "Plate01/RFP_Well_A3.fcs",
        ...                              conditions = {'Dox' : 10.0}),
        ...                    flow.Tube(file = "Plate01/CFP_Well_A4.fcs",
        ...                              conditions = {'Dox' : 1.0})]
        >>> import_op.conditions = {'Dox' : 'float'}
        >>> ex = import_op.apply()
        
    Plot a density plot
    
    .. plot::
        :context: close-figs
    
        >>> flow.Kde2DView(xchannel = 'V2-A',
        ...                xscale = 'log',
        ...                ychannel = 'Y2-A',
        ...                yscale = 'log',
        ...                huefacet = 'Dox').plot(ex)
    """
    
    id = Constant('edu.mit.synbio.cytoflow.view.kde2d')
    friend_id = Constant("2D Kernel Density Estimate")
    
    def plot(self, experiment, **kwargs):
        """
        Plot a faceted 2d kernel density estimate
        
        Parameters
        ----------
        shade : bool
            Shade the interior of the isoplot?  (default = `False`)
            
        min_alpha, max_alpha : float
            The minimum and maximum alpha blending values of the isolines,
            between 0 (transparent) and 1 (opaque).
            
        n_levels : int
            How many isolines to draw? (default = 10)
                
        bw : str or float
            The bandwidth for the gaussian kernel, controls how lumpy or smooth the
            kernel estimate is.  Choices are:
            
                - ``scott`` (the default) - ``1.059 * A * nobs ** (-1/5.)``, where ``A`` is ``min(std(X),IQR/1.34)``
                - ``silverman`` - ``.9 * A * nobs ** (-1/5.)``, where ``A`` is ``min(std(X),IQR/1.34)``
                
            If a float is given, it is the bandwidth.   Note, this is in 
            scaled units, not data units.
            
        gridsize : int
            How many times to compute the kernel on each axis?  (default: 100)
        
        Notes
        -----
        Other ``kwargs`` are passed to `matplotlib.axes.Axes.contour <https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.contour.html>`_

        """
        
        super().plot(experiment, **kwargs)
        
    def _grid_plot(self, experiment, grid, **kwargs):

        
        kwargs.setdefault('shade', False)
        kwargs.setdefault('min_alpha', 0.2)
        kwargs.setdefault('max_alpha', 0.9)
        kwargs.setdefault('n_levels', 10)
        
        lim = kwargs.pop('lim')
        xlim = lim[self.xchannel]
        ylim = lim[self.ychannel]
        
        scale = kwargs.pop('scale')
        xscale = scale[self.xchannel]
        yscale = scale[self.ychannel]

        legend_data = {}

        grid.map(_bivariate_kdeplot, 
                 self.xchannel, 
                 self.ychannel, 
                 xscale = xscale, 
                 yscale = yscale, 
                 legend_data = legend_data,
                 **kwargs)
                
        return dict(xlim = xlim,
                    xscale = xscale,
                    ylim = ylim,
                    yscale = yscale,
                    legend_data = legend_data)
Beispiel #13
0
class ChannelStatisticOp(HasStrictTraits):
    """
    Apply a function to subsets of a data set, and add it as a statistic
    to the experiment.
    
    The :meth:`apply` function groups the data by the variables in :attr:`by`, 
    then applies the :attr:`function` callable to the :attr:`channel` series 
    in each subset.  The callable should take a single :class:`pandas.Series` 
    as an argument.  The return type is arbitrary, but to be used with the rest 
    of :mod:`cytoflow` it should probably be a numeric type or an iterable of 
    numeric types.
    
    Attributes
    ----------
    name : Str
        The operation name.  Becomes the first element in the
        :attr:`.Experiment.statistics` key tuple.
    
    channel : Str
        The channel to apply the function to.
        
    function : Callable
        The function used to compute the statistic.  :attr:`function` must take 
        a :class:`pandas.Series` as its only parameter.  The return type is 
        arbitrary, but to be used with the rest of :mod:`cytoflow` it should 
        probably be a numeric type or an iterable of numeric types.  If 
        :attr:`statistic_name` is unset, the name of the function becomes the 
        second in element in the :attr:`.Experiment.statistics` key tuple.
        
        .. warning::
            Be careful!  Sometimes this function is called with an empty input!
            If this is the case, poorly-behaved functions can return ``NaN`` or 
            throw an error.  If this happens, it will be reported.
        
    statistic_name : Str
        The name of the function; if present, becomes the second element in
        the :attr:`.Experiment.statistics` key tuple.  Particularly useful if 
        :attr:`function` is a lambda expression.
        
    by : List(Str)
        A list of metadata attributes to aggregate the data before applying the
        function.  For example, if the experiment has two pieces of metadata,
        ``Time`` and ``Dox``, setting ``by = ["Time", "Dox"]`` will apply 
        :attr:`function` separately to each subset of the data with a unique 
        combination of ``Time`` and ``Dox``.
        
    subset : Str
        A Python expression sent to :meth:`.Experiment.query` to subset the 
        data before computing the statistic.
        
    fill : Any (default = 0)
        The value to use in the statistic if a slice of the data is empty.
   
    Examples
    --------
    
    .. plot::
        :context: close-figs
        
        Make a little data set.
    
        >>> import cytoflow as flow
        >>> import_op = flow.ImportOp()
        >>> import_op.tubes = [flow.Tube(file = "Plate01/RFP_Well_A3.fcs",
        ...                              conditions = {'Dox' : 10.0}),
        ...                    flow.Tube(file = "Plate01/CFP_Well_A4.fcs",
        ...                              conditions = {'Dox' : 1.0})]
        >>> import_op.conditions = {'Dox' : 'float'}
        >>> ex = import_op.apply()
    
    Create and parameterize the operation.
    
    .. plot::
        :context: close-figs
        
        >>> ch_op = flow.ChannelStatisticOp(name = 'MeanByDox',
        ...                     channel = 'Y2-A',
        ...                     function = flow.geom_mean,
        ...                     by = ['Dox'])
        >>> ex2 = ch_op.apply(ex)
        
    View the new operation
    
    >>> print(ex2.statistics.keys())
    dict_keys([('MeanByDox', 'geom_mean')])

    >>> print(ex2.statistics[('MeanByDox', 'geom_mean')])
    Dox
    1.0      19.805601
    10.0    446.981927
    dtype: float64

    """
    
    id = Constant('edu.mit.synbio.cytoflow.operations.channel_statistic')
    friendly_id = Constant("Channel Statistics")
    
    name = CStr
    channel = Str
    function = Callable
    statistic_name = Str
    by = List(Str)
    subset = Str
    fill = Any(0)
    
    def apply(self, experiment):
        """
        Apply the operation to an :class:`.Experiment`.
        
        Parameters
        ----------
        experiment
            The :class:`.Experiment` to apply this operation to.
            
        Returns
        -------
        Experiment
            A new :class:`.Experiment`, containing a new entry in 
            :attr:`.Experiment.statistics`.  The key of the new entry is a 
            tuple ``(name, function)`` (or ``(name, statistic_name)`` if 
            :attr:`statistic_name` is set.
        """
        
        if experiment is None:
            raise util.CytoflowOpError('experiment', "Must specify an experiment")

        if not self.name:
            raise util.CytoflowOpError('name', "Must specify a name")
        
        if not self.channel:
            raise util.CytoflowOpError('channel', "Must specify a channel")

        if not self.function:
            raise util.CytoflowOpError('function', "Must specify a function")

        if self.channel not in experiment.data:
            raise util.CytoflowOpError('channel',
                                       "Channel {0} not found in the experiment"
                                       .format(self.channel))
            
        if not self.by:
            raise util.CytoflowOpError('by',
                                       "Must specify some grouping conditions "
                                       "in 'by'")
            
        stat_name = (self.name, self.statistic_name) \
                     if self.statistic_name \
                     else (self.name, self.function.__name__)
                     
        if stat_name in experiment.statistics:
            raise util.CytoflowOpError('name',
                                       "{} is already in the experiment's statistics"
                                       .format(stat_name))

        new_experiment = experiment.clone()
        if self.subset:
            try:
                experiment = experiment.query(self.subset)
            except Exception as exc:
                raise util.CytoflowOpError('subset',
                                           "Subset string '{0}' isn't valid"
                                           .format(self.subset)) from exc
                
            if len(experiment) == 0:
                raise util.CytoflowOpError('subset',
                                           "Subset string '{0}' returned no events"
                                           .format(self.subset))
       
        for b in self.by:
            if b not in experiment.conditions:
                raise util.CytoflowOpError('by',
                                           "Aggregation metadata {} not found, "
                                           "must be one of {}"
                                           .format(b, experiment.conditions))
            unique = experiment.data[b].unique()

            if len(unique) == 1:
                warn("Only one category for {}".format(b), util.CytoflowOpWarning)

        groupby = experiment.data.groupby(self.by)

        for group, data_subset in groupby:
            if len(data_subset) == 0:
                warn("Group {} had no data"
                     .format(group), 
                     util.CytoflowOpWarning)
                
        idx = pd.MultiIndex.from_product([experiment[x].unique() for x in self.by], 
                                         names = self.by)

        stat = pd.Series(data = [self.fill] * len(idx),
                         index = idx, 
                         name = "{} : {}".format(stat_name[0], stat_name[1]),
                         dtype = np.dtype(object)).sort_index()
        
        for group, data_subset in groupby:
            if len(data_subset) == 0:
                continue
            
            if not isinstance(group, tuple):
                group = (group,)
            
            try:
                stat.loc[group] = self.function(data_subset[self.channel])
            except Exception as e:
                raise util.CytoflowOpError(None,
                                           "Your function threw an error in group {}"
                                           .format(group)) from e
            
            # check for, and warn about, NaNs.
            if stat.loc[list(group)].isna().any():
                warn("Found NaN in category {} returned {}"
                     .format(group, stat.loc[group]), 
                     util.CytoflowOpWarning)
                    
        # try to convert to numeric, but if there are non-numeric bits ignore
        stat = pd.to_numeric(stat, errors = 'ignore')
        
        new_experiment.history.append(self.clone_traits(transient = lambda _: True))
        new_experiment.statistics[stat_name] = stat
        
        return new_experiment
Beispiel #14
0
class Stats1DView(Base1DStatisticsView):
    """
    Plot a statistic.  The value of the statistic will be plotted on the
    Y axis; a numeric conditioning variable must be chosen for the X axis.
    Every variable in the statistic must be specified as either the `variable`
    or one of the plot facets.
    
    Attributes
    ----------
    variable_scale : {'linear', 'log', 'logicle'}
        The scale applied to the variable (on the X axis)
        
    Examples
    --------
    
    .. plot::
        :context: close-figs
        
        Make a little data set.
    
        >>> import cytoflow as flow
        >>> import_op = flow.ImportOp()
        >>> import_op.tubes = [flow.Tube(file = "Plate01/RFP_Well_A3.fcs",
        ...                              conditions = {'Dox' : 10.0}),
        ...                    flow.Tube(file = "Plate01/CFP_Well_A4.fcs",
        ...                              conditions = {'Dox' : 1.0})]
        >>> import_op.conditions = {'Dox' : 'float'}
        >>> ex = import_op.apply()
    
    Create and a new statistic.
    
    .. plot::
        :context: close-figs
        
        >>> ch_op = flow.ChannelStatisticOp(name = 'MeanByDox',
        ...                     channel = 'Y2-A',
        ...                     function = flow.geom_mean,
        ...                     by = ['Dox'])
        >>> ex2 = ch_op.apply(ex)
        
    View the new statistic
    
    .. plot::
        :context: close-figs
        
        >>> flow.Stats1DView(variable = 'Dox',
        ...                  statistic = ('MeanByDox', 'geom_mean'),
        ...                  variable_scale = 'log',
        ...                  scale = 'log').plot(ex2)
    """

    # traits
    id = Constant("edu.mit.synbio.cytoflow.view.stats1d")
    friendly_id = Constant("1D Statistics View")

    REMOVED_ERROR = Constant(
        "Statistics changed dramatically in 0.5; please see the documentation")
    by = util.Removed(err_string=REMOVED_ERROR)
    yfunction = util.Removed(err_string=REMOVED_ERROR)
    ychannel = util.Removed(err_string=REMOVED_ERROR)
    xvariable = util.Deprecated(new="variable")
    xscale = util.Deprecated(new='variable_scale')

    variable_scale = util.ScaleEnum

    def enum_plots(self, experiment):
        """
        Returns an iterator over the possible plots that this View can
        produce.  The values returned can be passed to :meth:`plot`.
        """

        return super().enum_plots(experiment)

    def plot(self, experiment, plot_name=None, **kwargs):
        """Plot a chart of a variable's values against a statistic.
        
        Parameters
        ----------
        
        variable_lim : (float, float)
            The limits on the variable axis
        
        color : a matplotlib color
            The color to plot with.  Overridden if `huefacet` is not `None`
            
        linewidth : float
            The width of the line, in points
            
        linestyle : ['solid' | 'dashed', 'dashdot', 'dotted' | (offset, on-off-dash-seq) | '-' | '--' | '-.' | ':' | 'None' | ' ' | '']
            
        marker : a matplotlib marker style
            See http://matplotlib.org/api/markers_api.html#module-matplotlib.markers
            
        markersize : int
            The marker size in points
            
        markerfacecolor : a matplotlib color
            The color to make the markers.  Overridden (?) if `huefacet` is not `None`
            
        alpha : the alpha blending value, from 0.0 (transparent) to 1.0 (opaque)
        
        capsize : scalar
            The size of the error bar caps, in points
            
        shade_error : bool
            If `False` (the default), plot the error statistic as traditional 
            "error bars."  If `True`, plot error statistic as a filled, shaded
            region.
            
        shade_alpha : float
            The transparency of the shaded error region, from 0.0 (transparent)
            to 1.0 (opaque.)  Default is 0.2.
        
        Notes
        -----
                
        Other `kwargs` are passed to `matplotlib.pyplot.plot <https://matplotlib.org/devdocs/api/_as_gen/matplotlib.pyplot.plot.html>`_
        
        """

        if experiment is None:
            raise util.CytoflowViewError('experiment',
                                         "No experiment specified")

        if self.variable not in experiment.conditions:
            raise util.CytoflowError(
                'variable',
                "Variable {} not in the experiment".format(self.variable))

        if not util.is_numeric(experiment[self.variable]):
            raise util.CytoflowError(
                'variable',
                "Variable {} must be numeric".format(self.variable))

        variable_scale = util.scale_factory(self.variable_scale,
                                            experiment,
                                            condition=self.variable)

        super().plot(experiment,
                     plot_name,
                     variable_scale=variable_scale,
                     **kwargs)

    def _grid_plot(self, experiment, grid, **kwargs):

        data = grid.data
        data_scale = kwargs.pop('scale')
        variable_scale = kwargs.pop('variable_scale')

        stat = experiment.statistics[self.statistic]
        stat_name = stat.name
        if self.error_statistic[0]:
            err_stat = experiment.statistics[self.error_statistic]
            err_stat_name = err_stat.name
        else:
            err_stat = None

        variable_lim = kwargs.pop("variable_lim", None)
        if variable_lim is None:
            variable_lim = (variable_scale.clip(
                data[self.variable].min() *
                0.9), variable_scale.clip(data[self.variable].max() * 1.1))

        lim = kwargs.pop("lim", None)
        if lim is None:
            lim = (data_scale.clip(data[stat_name].min() * 0.9),
                   data_scale.clip(data[stat_name].max() * 1.1))

            if self.error_statistic[0]:
                try:
                    lim = (data_scale.clip(
                        min([x[0] for x in data[err_stat_name]]) * 0.9),
                           data_scale.clip(
                               max([x[1] for x in data[err_stat_name]]) * 1.1))
                except (TypeError, IndexError):
                    lim = (data_scale.clip(
                        (data[stat_name].min() - data[err_stat_name].min()) *
                        0.9),
                           data_scale.clip((data[stat_name].max() +
                                            data[err_stat_name].max()) * 1.1))

        orientation = kwargs.pop('orientation', 'vertical')
        capsize = kwargs.pop('capsize', None)
        shade_error = kwargs.pop('shade_error', False)
        shade_alpha = kwargs.pop('shade_alpha', 0.2)

        if orientation == 'vertical':
            # plot the error bars first so the axis labels don't get overwritten
            if err_stat is not None:
                if shade_error:
                    grid.map(_v_error_shade,
                             self.variable,
                             stat_name,
                             err_stat_name,
                             alpha=shade_alpha)
                else:
                    grid.map(_v_error_bars,
                             self.variable,
                             stat_name,
                             err_stat_name,
                             capsize=capsize)

            grid.map(plt.plot, self.variable, stat_name, **kwargs)

            return dict(xscale=variable_scale,
                        xlim=variable_lim,
                        yscale=data_scale,
                        ylim=lim)
        else:
            # plot the error bars first so the axis labels don't get overwritten
            if err_stat is not None:
                if shade_error:
                    grid.map(_h_error_shade,
                             stat_name,
                             self.variable,
                             err_stat_name,
                             alpha=shade_alpha)
                else:
                    grid.map(_h_error_bars,
                             stat_name,
                             self.variable,
                             err_stat_name,
                             capsize=capsize)

            grid.map(plt.plot, stat_name, self.variable, **kwargs)

            return dict(yscale=variable_scale,
                        ylim=variable_lim,
                        xscale=data_scale,
                        xlim=lim)
 class TestClass(HasTraits):
     c_atr_1 = Constant([1, 2, 3, 4, 5])
     c_atr_2 = Constant({"a": 1, "b": 2})
Beispiel #16
0
class RangeOp(HasStrictTraits):
    """Apply a range gate to a cytometry experiment.
    
    Attributes
    ----------
    name : Str
        The operation name.  Used to name the new metadata field in the
        experiment that's created by apply()
        
    channel : Str
        The name of the channel to apply the range gate.
        
    low : Float
        The lowest value to include in this gate.
        
    high : Float
        The highest value to include in this gate.
        
    Examples
    --------
    >>> range = flow.RangeOp()
    >>> range.name = "Y2-A+"
    >>> range.channel = 'Y2-A'
    >>> range.low = 0.3
    >>> range.high = 0.8
    >>> 
    >>> ex3 = range.apply(ex2)
    
    Alternately  (in an IPython notebook with `%matplotlib notebook`)
    
    >>> r = RangeOp(name = 'Y2-A+',
    ...             channel = 'Y2-A')
    >>> rv = r.default_view()
    >>> rv.interactive = True
    >>> rv.plot(ex2)
    >>> ### draw a range on the plot ###
    >>> ex3 = r.apply(ex2)
    """

    # traits
    id = Constant('edu.mit.synbio.cytoflow.operations.range')
    friendly_id = Constant('Range')

    name = CStr()
    channel = Str()
    low = CFloat()
    high = CFloat()

    def apply(self, experiment):
        """Applies the threshold to an experiment.
        
        Parameters
        ----------
        experiment : Experiment
            the old_experiment to which this op is applied
            
        Returns
        -------
            a new experiment, the same as old_experiment but with a new
            column the same as the operation name.  The bool is True if the
            event's measurement in self.channel is greater than self.low and
            less than self.high; it is False otherwise.
        """

        if experiment is None:
            raise util.CytoflowOpError("No experiment specified")

        # make sure name got set!
        if not self.name:
            raise util.CytoflowOpError("You have to set the gate's name "
                                       "before applying it!")

        if self.name in experiment.data.columns:
            raise util.CytoflowOpError(
                "Experiment already has a column named {0}".format(self.name))

        if not self.channel:
            raise util.CytoflowOpError("Channel not specified")

        if not self.channel in experiment.channels:
            raise util.CytoflowOpError(
                "Channel {0} not in the experiment".format(self.channel))

        if self.high <= self.low:
            raise util.CytoflowOpError("range high must be > range low")

        if self.high <= experiment[self.channel].min():
            raise util.CytoflowOpError("range high must be > {0}".format(
                experiment[self.channel].min()))
        if self.low >= experiment[self.channel].max():
            raise util.CytoflowOpError("range low must be < {0}".format(
                experiment[self.channel].max()))

        gate = experiment[self.channel].between(self.low, self.high)
        new_experiment = experiment.clone()
        new_experiment.add_condition(self.name, "bool", gate)
        new_experiment.history.append(
            self.clone_traits(transient=lambda _: True))

        return new_experiment

    def default_view(self, **kwargs):
        return RangeSelection(op=self, **kwargs)
Beispiel #17
0
class FlowPeaks2DDensityView(By2DView, AnnotatingView, NullView):
    """
    A two-dimensional diagnostic view for :class:`FlowPeaksOp`.  Plots the
    estimated density function of the two channels, then overlays the k-means 
    centroids in blue and the clusters-of-k-means in pink.

    Attributes
    ----------    
        
    """

    id = Constant('edu.mit.synbio.cytoflow.view.flowpeaks2ddensityview')
    friendly_id = Constant("FlowPeaks 2D Diagnostic Plot (Density)")

    xchannel = Str
    ychannel = Str
    xscale = util.ScaleEnum
    yscale = util.ScaleEnum
    huefacet = Constant(None)

    def plot(self, experiment, **kwargs):
        """
        Plot the plots.
        
        Parameters
        ----------
        """

        if experiment is None:
            raise util.CytoflowViewError('experiment',
                                         "No experiment specified")

        annotations = {}
        for k in self.op._kmeans:
            annotations[k] = (self.op._kmeans[k], self.op._peaks[k],
                              self.op._cluster_peak[k])

        if self.xchannel in self.op._scale:
            xscale = self.op._scale[self.xchannel]
        else:
            xscale = util.scale_factory(self.xscale,
                                        experiment,
                                        channel=self.xchannel)

        if self.ychannel in self.op._scale:
            yscale = self.op._scale[self.ychannel]
        else:
            yscale = util.scale_factory(self.yscale,
                                        experiment,
                                        channel=self.ychannel)

        if not self.op._kmeans:
            raise util.CytoflowViewError(
                None, "Must estimate a model before plotting "
                "the density plot.")

        for k in self.op._kmeans:
            annotations[k] = (self.op._kmeans[k], self.op._peaks[k],
                              self.op._cluster_peak[k], self.op._density[k])

        super().plot(experiment,
                     annotations=annotations,
                     xscale=xscale,
                     yscale=yscale,
                     **kwargs)

    def _grid_plot(self, experiment, grid, **kwargs):
        # all the real plotting happens in _annotation_plot.  this just sets some
        # defaults and then stores them for later.

        kwargs.setdefault('antialiased', False)
        kwargs.setdefault('linewidth', 0)
        kwargs.setdefault('edgecolors', 'face')
        kwargs.setdefault('shading', 'auto')
        kwargs.setdefault('cmap', plt.get_cmap('viridis'))

        xscale = kwargs['scale'][self.xchannel]
        xlim = kwargs['lim'][self.xchannel]
        yscale = kwargs['scale'][self.ychannel]
        ylim = kwargs['lim'][self.ychannel]

        # can't modify colormaps in place
        cmap = copy.copy(kwargs['cmap'])

        under_color = kwargs.pop('under_color', None)
        if under_color is not None:
            cmap.set_under(color=under_color)
        else:
            cmap.set_under(color=cmap(0.0))

        bad_color = kwargs.pop('bad_color', None)
        if bad_color is not None:
            cmap.set_bad(color=cmap(0.0))

        gridsize = kwargs.pop('gridsize', 50)
        xbins = xscale.inverse(
            np.linspace(xscale(xlim[0]), xscale(xlim[1]), gridsize))
        ybins = yscale.inverse(
            np.linspace(yscale(ylim[0]), yscale(ylim[1]), gridsize))

        for (i, j, _), _ in grid.facet_data():
            ax = grid.facet_axis(i, j)
            ax.fp_xbins = xbins
            ax.fp_ybins = ybins
            ax.fp_keywords = kwargs

        super()._grid_plot(experiment, grid, **kwargs)

        return dict(xscale=xscale,
                    xlim=xlim,
                    yscale=yscale,
                    ylim=ylim,
                    cmap=kwargs['cmap'])

    def _annotation_plot(self, axes, annotation, annotation_facet,
                         annotation_value, annotation_color, **kwargs):

        km = annotation[0]
        peaks = annotation[1]
        cluster_peak = annotation[2]
        density = annotation[3]

        xbins = axes.fp_xbins
        ybins = axes.fp_ybins
        kwargs = axes.fp_keywords

        # get rid of some kwargs that confuse pcolormesh
        kwargs.pop('annotations', None)
        kwargs.pop('annotation_facet', None)
        kwargs.pop('plot_name', None)

        xscale = kwargs['scale'][self.xchannel]
        yscale = kwargs['scale'][self.ychannel]

        kwargs.pop('scale')
        kwargs.pop('lim')

        smoothed = kwargs.pop('smoothed', False)
        smoothed_sigma = kwargs.pop('smoothed_sigma', 1)

        h = density(util.cartesian([xscale(xbins), yscale(ybins)]))
        h = np.reshape(h, (len(xbins), len(ybins)))
        if smoothed:
            h = scipy.ndimage.filters.gaussian_filter(h, sigma=smoothed_sigma)
        axes.pcolormesh(xbins, ybins, h.T, **kwargs)

        ix = self.op.channels.index(self.xchannel)
        iy = self.op.channels.index(self.ychannel)

        for k in range(len(km.cluster_centers_)):

            x = self.op._scale[self.xchannel].inverse(
                km.cluster_centers_[k][ix])
            y = self.op._scale[self.ychannel].inverse(
                km.cluster_centers_[k][iy])

            plt.plot(x, y, '*', color='blue')

            peak_idx = cluster_peak[k]
            peak = peaks[peak_idx]
            peak_x = xscale.inverse(peak[0])
            peak_y = yscale.inverse(peak[1])

            plt.plot([x, peak_x], [y, peak_y])

        for peak in peaks:
            x = self.op._scale[self.ychannel].inverse(peak[0])
            y = self.op._scale[self.xchannel].inverse(peak[1])
            plt.plot(x, y, 'o', color="magenta")
class MATS3DMplDamageODF(MATS3DEval):

    epsilon_0 = Float(59.0e-6,
                      label="a",
                      desc="Lateral pressure coefficient",
                      enter_set=True,
                      auto_set=False)

    epsilon_f = Float(250.0e-6,
                      label="a",
                      desc="Lateral pressure coefficient",
                      enter_set=True,
                      auto_set=False)

    c_T = Float(1.0,
                label="a",
                desc="Lateral pressure coefficient",
                enter_set=True,
                auto_set=False)

    zeta_G = Float(1.0,
                   label="zeta_G",
                   desc="anisotropy parameter",
                   enter_set=True,
                   auto_set=False)

    E = tr.Float(34000.0,
                 label="E",
                 desc="Young's Modulus",
                 auto_set=False,
                 input=True)

    nu = tr.Float(0.2,
                  label='nu',
                  desc="Poison ratio",
                  auto_set=False,
                  input=True)

    state_var_shapes = tr.Property(tr.Dict(), depends_on='n_mp')
    '''Dictionary of state variable entries with their array shapes.
    '''
    @cached_property
    def _get_state_var_shapes(self):
        return {'kappa': (self.n_mp, ), 'omega': (self.n_mp, )}

    #-------------------------------------------------------------------------
    # MICROPLANE-Kinematic constraints
    #-------------------------------------------------------------------------

    # get the dyadic product of the microplane normals
    _MPNN = Property(depends_on='n_mp')

    @cached_property
    def _get__MPNN(self):
        # dyadic product of the microplane normals

        MPNN_nij = np.einsum('ni,nj->nij', self._MPN, self._MPN)
        return MPNN_nij

    # get the third order tangential tensor (operator) for each microplane
    _MPTT = Property(depends_on='n_mp')

    @cached_property
    def _get__MPTT(self):
        # Third order tangential tensor for each microplane
        delta = np.identity(3)
        MPTT_nijr = 0.5 * (
            np.einsum('ni,jr -> nijr', self._MPN, delta) +
            np.einsum('nj,ir -> njir', self._MPN, delta) - 2.0 *
            np.einsum('ni,nj,nr -> nijr', self._MPN, self._MPN, self._MPN))
        return MPTT_nijr

    def _get_e_Emna(self, eps_Emab):
        # Projection of apparent strain onto the individual microplanes
        e_ni = np.einsum('nb,Emba->Emna', self._MPN, eps_Emab)
        return e_ni

    def _get_e_N_Emn(self, e_Emna):
        # get the normal strain array for each microplane
        e_N_Emn = np.einsum('Emna, na->Emn', e_Emna, self._MPN)
        return e_N_Emn

    def _get_e_N_arr_2(self, eps_Emab):

        #eps_mtx = self.map_eps_eng_to_mtx(eps_eng)
        return np.einsum('nij,Emij->Emn', self._MPNN, eps_Emab)

    def _get_e_t_vct_arr_2(self, eps_Emab):

        #eps_mtx = self.map_eps_eng_to_mtx(eps_eng)
        MPTT_ijr = self._get__MPTT()
        return np.einsum('nijr,Emij->Emnr', MPTT_ijr, eps_Emab)

    def _get_e_equiv_Emn(self, e_Emna):
        '''
        Returns a list of the microplane equivalent strains
        based on the list of microplane strain vectors
        '''
        # magnitude of the normal strain vector for each microplane
        e_N_Emn = self._get_e_N_Emn(e_Emna)
        # print e_N_Emn[0, -1, :]
        # positive part of the normal strain magnitude for each microplane
        e_N_pos_Emn = (np.abs(e_N_Emn) + e_N_Emn) / 2.0
        # normal strain vector for each microplane
        e_N_Emna = np.einsum('Emn,ni -> Emni', e_N_Emn, self._MPN)
        # tangent strain ratio
        c_T = self.c_T
        # tangential strain vector for each microplane
        e_T_Emna = e_Emna - e_N_Emna
        # squared tangential strain vector for each microplane
        e_TT_Emn = np.einsum('Emni,Emni -> Emn', e_T_Emna, e_T_Emna)
        # print e_TT_Emn[0, -1, :]
        # equivalent strain for each microplane
        e_equiv_Emn = np.sqrt(e_N_pos_Emn * e_N_pos_Emn + c_T * e_TT_Emn)
        return e_equiv_Emn

    def update_state_variables(self, eps_Emab, kappa, omega):

        e_Emna = self._get_e_Emna(eps_Emab)
        eps_eq_Emn = self._get_e_equiv_Emn(e_Emna)
        f_trial_Emn = eps_eq_Emn - self.epsilon_0
        I = np.where(f_trial_Emn > 0)
        kappa[I] = eps_eq_Emn[I]
        omega[I] = self._get_omega(eps_eq_Emn[I])
        return I

    def _get_omega(self, kappa_Emn):
        '''
        Return new value of damage parameter
        @param kappa:
        '''
        omega_Emn = np.zeros_like(kappa_Emn)
        epsilon_0 = self.epsilon_0
        epsilon_f = self.epsilon_f
        I = np.where(kappa_Emn >= epsilon_0)
        omega_Emn[I] = (
            1.0 -
            (epsilon_0 / kappa_Emn[I] * np.exp(-1.0 *
                                               (kappa_Emn[I] - epsilon_0) /
                                               (epsilon_f - epsilon_0))))
        return omega_Emn

    def _get_phi_Emab(self, kappa_Emn):
        # Returns the 2nd order damage tensor 'phi_mtx'
        # scalar integrity factor for each microplane
        phi_Emn = 1.0 - self._get_omega(kappa_Emn)
        # integration terms for each microplanes
        phi_Emab = np.einsum('Emn,n,nab->Emab', phi_Emn, self._MPW, self._MPNN)

        return phi_Emab

    #----------------------------------------------------------------
    #  the fourth order volumetric-np.identity( tensor
    #----------------------------------------------------------------
    def _get_I_vol_abcd(self):

        delta = np.identity(3)
        I_vol_abcd = (1.0 / 3.0) * np.einsum('ab,cd -> abcd', delta, delta)
        return I_vol_abcd

    #----------------------------------------------------------------
    # Returns the fourth order deviatoric-np.identity( tensor
    #----------------------------------------------------------------
    def _get_I_dev_abcd(self):

        delta = np.identity(3)
        I_dev_abcd = 0.5 * (np.einsum('ac,bd -> abcd', delta, delta) +
                            np.einsum('ad,bc -> abcd', delta, delta)) \
            - (1.0 / 3.0) * np.einsum('ab,cd -> abcd', delta, delta)

        return I_dev_abcd

    #----------------------------------------------------------------
    # Returns the fourth order tensor P_vol [Wu.2009]
    #----------------------------------------------------------------
    def _get_P_vol_ab(self):

        delta = np.identity(3)
        P_vol_ab = (1.0 / 3.0) * delta
        return P_vol_ab

    #----------------------------------------------------------------
    # Returns the fourth order tensor P_dev [Wu.2009]
    #----------------------------------------------------------------
    def _get_P_dev_nabc(self):

        delta = np.identity(3)
        P_dev_nabc = 0.5 * \
            np.einsum('nd,da,bc -> nabc', self._MPN, delta, delta)
        return P_dev_nabc

    #----------------------------------------------------------------
    # Returns the outer product of P_vol [Wu.2009]
    #----------------------------------------------------------------
    def _get_PP_vol_abcd(self):

        delta = np.identity(3)
        PP_vol_abcd = (1.0 / 9.0) * np.einsum('ab,cd -> abcd', delta, delta)
        return PP_vol_abcd

    #----------------------------------------------------------------
    # Returns the inner product of P_dev
    #----------------------------------------------------------------
    def _get_PP_dev_nabcd(self):
        delta = np.identity(3)
        PP_dev_nabcd = 0.5 * (
            0.5 * (np.einsum('na,nc,bd -> nabcd', self._MPN, self._MPN, delta) +
                   np.einsum('na,nd,bc -> nabcd', self._MPN, self._MPN, delta)) +
            0.5 * (np.einsum('ac,nb,nd -> nabcd',  delta, self._MPN, self._MPN) +
                   np.einsum('ad,nb,nc -> nabcd',  delta, self._MPN, self._MPN))) -\
            (1.0 / 3.0) * (np.einsum('na,nb,cd -> nabcd', self._MPN, self._MPN, delta) +
                           np.einsum('ab,nc,nd -> nabcd', delta, self._MPN, self._MPN)) +\
            (1.0 / 9.0) * np.einsum('ab,cd -> abcd', delta, delta)
        return PP_dev_nabcd

    def _get_I_vol_4(self):
        # The fourth order volumetric-np.identity( tensor
        delta = np.identity(3)
        I_vol_ijkl = (1.0 / 3.0) * np.einsum('ij,kl -> ijkl', delta, delta)
        return I_vol_ijkl

    def _get_I_dev_4(self):
        # The fourth order deviatoric-np.identity( tensor
        delta = np.identity(3)
        I_dev_ijkl = 0.5 * (np.einsum('ik,jl -> ijkl', delta, delta) +
                            np.einsum('il,jk -> ijkl', delta, delta)) \
            - (1 / 3.0) * np.einsum('ij,kl -> ijkl', delta, delta)

        return I_dev_ijkl

    def _get_P_vol(self):
        delta = np.identity(3)
        P_vol_ij = (1 / 3.0) * delta
        return P_vol_ij

    def _get_P_dev(self):
        delta = np.identity(3)
        P_dev_njkl = 0.5 * \
            np.einsum('ni,ij,kl -> njkl', self._MPN, delta, delta)
        return P_dev_njkl

    def _get_PP_vol_4(self):
        # outer product of P_vol
        delta = np.identity(3)
        PP_vol_ijkl = (1 / 9.) * np.einsum('ij,kl -> ijkl', delta, delta)
        return PP_vol_ijkl

    def _get_PP_dev_4(self):
        # inner product of P_dev
        delta = np.identity(3)
        PP_dev_nijkl = 0.5 * (
            0.5 * (np.einsum('ni,nk,jl -> nijkl', self._MPN, self._MPN, delta) +
                   np.einsum('ni,nl,jk -> nijkl', self._MPN, self._MPN, delta)) +
            0.5 * (np.einsum('ik,nj,nl -> nijkl',  delta, self._MPN, self._MPN) +
                   np.einsum('il,nj,nk -> nijkl',  delta, self._MPN, self._MPN))) -\
            (1 / 3.) * (np.einsum('ni,nj,kl -> nijkl', self._MPN, self._MPN, delta) +
                        np.einsum('ij,nk,nl -> nijkl', delta, self._MPN, self._MPN)) +\
            (1 / 9.) * np.einsum('ij,kl -> ijkl', delta, delta)
        return PP_dev_nijkl

    #--------------------------------------------------------------------------
    # Returns the fourth order secant stiffness tensor (cf. [Wu.2009], Eq.(29))
    #--------------------------------------------------------------------------
    def _get_S_1_Emabcd(self, eps_Emab, kappa, omega):

        K0 = self.E / (1.0 - 2.0 * self.nu)
        G0 = self.E / (1.0 + self.nu)

        phi_Emn = 1.0 - self._get_omega(kappa)
        # print 'phi_Emn', phi_Emn

        PP_vol_abcd = self._get_PP_vol_abcd()
        PP_dev_nabcd = self._get_PP_dev_nabcd()
        I_dev_abcd = self._get_I_dev_abcd()

        #         PP_vol_abcd = self._get_PP_vol_4()
        #         PP_dev_nabcd = self._get_PP_dev_4()
        #         I_dev_abcd = self._get_I_dev_4()

        S_1_Emabcd = K0 * \
            np.einsum('Emn, n, abcd-> Emabcd', phi_Emn, self._MPW, PP_vol_abcd) + \
            G0 * 2.0 * self.zeta_G * np.einsum('Emn, n, nabcd-> Emabcd',
                                               phi_Emn, self._MPW, PP_dev_nabcd) - (1.0 / 3.0) * (
                2.0 * self.zeta_G - 1.0) * G0 * np.einsum('Emn, n, abcd-> Emabcd',
                                                          phi_Emn, self._MPW, I_dev_abcd)

        return S_1_Emabcd

#     #------------------------------------------
#     # scalar damage factor for each microplane
#     #------------------------------------------
#     def _get_d_Em(self, s_Emng, eps_Emab):
#
#         d_Emn = 1.0 - self.get_state_variables(s_Emng, eps_Emab)[0]
#
#         d_Em = (1.0 / 3.0) * np.einsum('Emn,n-> Em',  d_Emn, self._MPW)
#
#         return d_Em
#
#     #------------------------------------------
#     # The 4th order volumetric damage tensor
#     #------------------------------------------
#     def _get_M_vol_abcd(self, sctx, eps_app_eng, sigma_kk):
#
#         d = self._get_Em( s_Emng, eps_Emab)
#         delta = np.identity(2)
#
#         I_4th_abcd = 0.5 * (np.einsum('ac,bd -> ijkl', delta, delta) +
#                             np.einsum('il,jk -> ijkl', delta, delta))
#
#         # print 'M_vol', (1 - d) * I_4th_ijkl
#
#         return (1 - d) * I_4th_ijkl
#
#     #------------------------------------------
#     # The 4th order deviatoric damage tensor
#     #------------------------------------------
#     def _get_M_dev_tns(self, phi_mtx):
#
#         delta = np.identity(3)
#         I_4th_ijkl = 0.5 * (np.einsum('ik,jl -> ijkl', delta, delta) +
#                             np.einsum('il,jk -> ijkl', delta, delta))
#         tr_phi_mtx = np.trace(phi_mtx)
#
#         M_dev_ijkl = self.zeta_G * (0.5 * (np.einsum('ik,jl->ijkl', delta, phi_mtx) +
#                                            np.einsum('il,jk->ijkl', delta, phi_mtx)) +
#                                     0.5 * (np.einsum('ik,jl->ijkl', phi_mtx, delta) +
#                                            np.einsum('il,jk->ijkl', phi_mtx, delta))) \
#             - (2. * self.zeta_G - 1.) * (tr_phi_mtx / 3.) * I_4th_ijkl
#
#         return M_dev_ijkl
#
#     #--------------------------------------------------------------------------
#     # Returns the fourth order secant stiffness tensor (cf. [Wu.2009], Eq.(31))
#     #--------------------------------------------------------------------------
#     def _get_S_2_Emabcd(self, sctx, eps_app_eng, sigma_kk):
#
#         K0 = self.E / (1. - 2. * self.nu)
#         G0 = self.E / (1. + self.nu)
#
#         I_vol_ijkl = self._get_I_vol_4()
#         I_dev_ijkl = self._get_I_dev_4()
#         phi_mtx = self._get_phi_mtx(sctx, eps_app_eng, sigma_kk)
#         M_vol_ijkl = self._get_M_vol_tns(sctx, eps_app_eng, sigma_kk)
#         M_dev_ijkl = self._get_M_dev_tns(phi_mtx)
#
#         S_2_ijkl = K0 * np.einsum('ijmn,mnrs,rskl -> ijkl', I_vol_ijkl, M_vol_ijkl, I_vol_ijkl ) \
#             + G0 * np.einsum('ijmn,mnrs,rskl -> ijkl', I_dev_ijkl, M_dev_ijkl, I_dev_ijkl)\
#
#         return S_2_ijkl
#
#     #--------------------------------------------------------------------------
#     # Returns the fourth order secant stiffness tensor (cf. [Wu.2009], Eq.(34))
#     #--------------------------------------------------------------------------
#     def _get_S_3_Emabcd(self, sctx, eps_app_eng, sigma_kk):
#
#         K0 = self.E / (1. - 2. * self.nu)
#         G0 = self.E / (1. + self.nu)
#
#         I_vol_ijkl = self._get_I_vol_4()
#         I_dev_ijkl = self._get_I_dev_4()
#
#         # The fourth order elastic stiffness tensor
#         S_0_ijkl = K0 * I_vol_ijkl + G0 * I_dev_ijkl
#
#         d_n = self._get_state_variables(sctx, eps_app_eng, sigma_kk)[:, 5]
#
#         PP_vol_4 = self._get_PP_vol_4()
#         PP_dev_4 = self._get_PP_dev_4()
#
#         delta = np.identity(3)
#         I_4th_ijkl = np.einsum('ik,jl -> ijkl', delta, delta)
#
#         D_ijkl = np.einsum('n,n,ijkl->ijkl', d_n, self._MPW, PP_vol_4) + \
#             2 * self.zeta_G * np.einsum('n,n,nijkl->ijkl', d_n, self._MPW, PP_dev_4) - (
#                 1 / 3.) * (2 * self.zeta_G - 1) * np.einsum('n,n,ijkl->ijkl', d_n, self._MPW, I_dev_ijkl)
#
#         phi_ijkl = (I_4th_ijkl - D_ijkl)
#
#         S_ijkl = np.einsum('ijmn,mnkl', phi_ijkl, S_0_ijkl)
#
#         return S_ijkl
#
#-------------------------------------------------------------------------
# Returns the fourth order secant stiffness tensor with the (double orthotropic) assumption
#-------------------------------------------------------------------------

    def _get_S_4_Emabcd(self, eps_Emab, kappa, omega):

        K0 = self.E / (1.0 - 2.0 * self.nu)
        G0 = self.E / (1.0 + self.nu)
        I_vol_abcd = self._get_I_vol_abcd()
        I_dev_abcd = self._get_I_dev_abcd()

        delta = np.identity(3)
        phi_Emab = self._get_phi_Emab(kappa)

        D_Emab = delta - phi_Emab

        d_Em = (1.0 / 3.0) * np.einsum('Emaa -> Em', D_Emab)

        D_bar_Emab = self.zeta_G * \
            (D_Emab - np.einsum('Em, ab -> Emab', d_Em, delta))

        S_4_Emabcd = K0 * I_vol_abcd - K0 * np.einsum('Em,abcd -> Emabcd',
                                                      d_Em, I_vol_abcd) +\
            G0 * I_dev_abcd - G0 * np.einsum('Em,abcd -> Emabcd',
                                             d_Em, I_dev_abcd) +\
            (2.0 / 3.0) * (G0 - K0) * (np.einsum('ij,Emkl -> Emijkl',
                                                 delta, D_bar_Emab) +
                                       np.einsum('Emij,kl -> Emijkl',
                                                 D_bar_Emab, delta)) + 0.5 * (- K0 + 2.0 * G0) *\
            (0.5 * (np.einsum('ik,Emjl -> Emijkl', delta, D_bar_Emab) + np.einsum('Emil,jk -> Emijkl', D_bar_Emab, delta)) +
             0.5 * (np.einsum('Emil,jk -> Emijkl', D_bar_Emab, delta) + np.einsum('ik,Emjl -> Emijkl', delta, D_bar_Emab)))

        return S_4_Emabcd

    #----------------------------------------------------------------------
    # Returns the fourth order secant stiffness tensor (restrctive orthotropic)
    #----------------------------------------------------------------------
    def _get_S_5_Emabcd(self, eps_Emab, kappa, omega):

        K0 = self.E / (1.0 - 2.0 * self.nu)
        G0 = self.E / (1.0 + self.nu)

        delta = np.identity(3)
        phi_Emab = self._get_phi_Emab(kappa)

        # damaged stiffness without simplification
        S_5_Emabcd = (1.0 / 3.0) * (K0 - G0) * 0.5 * ((np.einsum('ij,Emkl -> Emijkl', delta, phi_Emab) +
                                                       np.einsum('Emij,kl -> Emijkl', phi_Emab, delta))) + \
            G0 * 0.5 * ((0.5 * (np.einsum('ik,Emjl -> Emijkl', delta, phi_Emab) + np.einsum('Emil,jk -> Emijkl', phi_Emab, delta)) +
                         0.5 * (np.einsum('Emik,jl -> ijkl', phi_Emab, delta) + np.einsum('il,Emjk  -> Emijkl', delta, phi_Emab))))

        return S_5_Emabcd

    #-------------------------------------------------------------------------
    # Evaluation - get the corrector and predictor
    #-------------------------------------------------------------------------

    def get_corr_pred(self, eps_Emab, tn1, kappa, omega):

        I = self.update_state_variables(eps_Emab, kappa, omega)
        D_Emabcd = self._get_S_4_Emabcd(eps_Emab, kappa, omega)
        sig_Emab = np.einsum('Emabcd,Emcd -> Emab', D_Emabcd, eps_Emab)

        return sig_Emab, D_Emabcd

    #-----------------------------------------------
    # number of microplanes - currently fixed for 3D
    #-----------------------------------------------
    n_mp = Constant(28)

    #-----------------------------------------------
    # get the normal vectors of the microplanes
    #-----------------------------------------------
    _MPN = Property(depends_on='n_mp')

    @cached_property
    def _get__MPN(self):
        return np.array([[.577350259, .577350259, .577350259],
                         [.577350259, .577350259, -.577350259],
                         [.577350259, -.577350259, .577350259],
                         [.577350259, -.577350259, -.577350259],
                         [.935113132, .250562787, .250562787],
                         [.935113132, .250562787, -.250562787],
                         [.935113132, -.250562787, .250562787],
                         [.935113132, -.250562787, -.250562787],
                         [.250562787, .935113132, .250562787],
                         [.250562787, .935113132, -.250562787],
                         [.250562787, -.935113132, .250562787],
                         [.250562787, -.935113132, -.250562787],
                         [.250562787, .250562787, .935113132],
                         [.250562787, .250562787, -.935113132],
                         [.250562787, -.250562787, .935113132],
                         [.250562787, -.250562787, -.935113132],
                         [.186156720, .694746614, .694746614],
                         [.186156720, .694746614, -.694746614],
                         [.186156720, -.694746614, .694746614],
                         [.186156720, -.694746614, -.694746614],
                         [.694746614, .186156720, .694746614],
                         [.694746614, .186156720, -.694746614],
                         [.694746614, -.186156720, .694746614],
                         [.694746614, -.186156720, -.694746614],
                         [.694746614, .694746614, .186156720],
                         [.694746614, .694746614, -.186156720],
                         [.694746614, -.694746614, .186156720],
                         [.694746614, -.694746614, -.186156720]])

    #-------------------------------------
    # get the weights of the microplanes
    #-------------------------------------
    _MPW = Property(depends_on='n_mp')

    @cached_property
    def _get__MPW(self):
        return np.array([
            .0160714276, .0160714276, .0160714276, .0160714276, .0204744730,
            .0204744730, .0204744730, .0204744730, .0204744730, .0204744730,
            .0204744730, .0204744730, .0204744730, .0204744730, .0204744730,
            .0204744730, .0158350505, .0158350505, .0158350505, .0158350505,
            .0158350505, .0158350505, .0158350505, .0158350505, .0158350505,
            .0158350505, .0158350505, .0158350505
        ]) * 6.0
Beispiel #19
0
class FlowPeaks1DView(By1DView, AnnotatingView, HistogramView):
    """
    A one-dimensional diagnostic view for :class:`FlowPeaksOp`.  Plots a histogram
    of the channel, then overlays the k-means centroids in blue.

    Attributes
    ----------    

    """

    id = Constant('edu.mit.synbio.cytoflow.view.flowpeaks1dview')
    friendly_id = Constant("1D FlowPeaks Diagnostic Plot")

    channel = Str
    scale = util.ScaleEnum

    def plot(self, experiment, **kwargs):
        """
        Plot the plots.
        
        Parameters
        ----------
        
        """

        if experiment is None:
            raise util.CytoflowViewError('experiment',
                                         "No experiment specified")

        view, trait_name = self._strip_trait(self.op.name)

        if self.channel in self.op._scale:
            scale = self.op._scale[self.channel]
        else:
            scale = util.scale_factory(self.scale,
                                       experiment,
                                       channel=self.channel)

        super(FlowPeaks1DView, view).plot(experiment,
                                          annotation_facet=self.op.name,
                                          annotation_trait=trait_name,
                                          annotations=self.op._kmeans,
                                          scale=scale,
                                          **kwargs)

    def _annotation_plot(self, axes, annotation, annotation_facet,
                         annotation_value, annotation_color, **kwargs):

        kwargs.setdefault('orientation', 'vertical')

        if kwargs['orientation'] == 'horizontal':
            cidx = self.op.channels.index(self.channel)
            for k in range(0, self.op.num_clusters):
                c = self.op._scale[self.channel].inverse(
                    annotation.cluster_centers_[k][cidx])
                plt.axhline(c, linewidth=3, color='blue')
        else:
            cidx = self.op.channels.index(self.channel)
            for k in range(0, self.op.num_clusters):
                c = self.op._scale[self.channel].inverse(
                    annotation.cluster_centers_[k][cidx])
                plt.axvline(c, linewidth=3, color='blue')
Beispiel #20
0
class HlogScale(ScaleMixin):
    """
    A scale that transforms the data using the `hyperlog` function.
    
    This scaling method implements a "linear-like" region around 0, and a
    "log-like" region for large values, with a smooth transition between
    them.
    
    The transformation has one parameter, `b`, which specifies the location of
    the transition from linear to log-like.  The default, `500`, is good for
    18-bit scales and not good for other scales.
    
    Attributes
    ----------
    b : Float (default = 500)
        the location of the transition from linear to log-like.
    
    References
    ----------
    [1] Hyperlog-a flexible log-like transform for negative, zero, and positive 
        valued data.
        Bagwell CB.
        Cytometry A. 2005 Mar;64(1):34-42. 
        PMID: 15700280
        http://onlinelibrary.wiley.com/doi/10.1002/cyto.a.20114/abstract
    """

    id = Constant("edu.mit.synbio.cytoflow.utility.hlog")
    name = "hlog"

    experiment = Instance("cytoflow.Experiment")

    # what data do we use to compute scale parameters?  set one.
    channel = Str
    condition = Str
    statistic = Tuple(Str, Str)
    error_statistic = Tuple(Str, Str)
    data = Array

    range = Property(Float)
    b = Float(200, desc="location of the log transition")

    mpl_params = Property(Dict, depends_on="[b, range, scale_min, scale_max]")

    def __call__(self, data):
        """
        Transforms `data` using this scale.
        
        Careful!  May return `NaN` if the scale domain doesn't match the data 
        (ie, applying a log10 scale to negative numbers.)
        """

        f = _make_hlog_numeric(self.b, 1.0, np.log10(self.range))

        if isinstance(data, pd.Series):
            return data.apply(f)
        elif isinstance(data, np.ndarray):
            return f(data)
        elif isinstance(data, (int, float)):
            # numpy returns a 0-dim array.  wtf.
            return float(f(data))
        else:
            try:
                return list(map(f, data))
            except TypeError as e:
                raise CytoflowError(
                    "Unknown data type in HlogScale.__call__") from e

    def inverse(self, data):
        """
        Transforms 'data' using the inverse of this scale.
        """

        f_inv = lambda y, b=self.b, d=np.log10(self.range): hlog_inv(
            y, b, 1.0, d)

        if isinstance(data, pd.Series):
            return data.apply(f_inv)
        elif isinstance(data, np.ndarray):
            inverse = np.vectorize(f_inv)
            return inverse(data)
        elif isinstance(data, float):
            return f_inv(data)
        else:
            try:
                return list(map(f_inv, data))
            except TypeError as e:
                raise CytoflowError(
                    "Unknown data type in HlogScale.inverse") from e

    def clip(self, data):
        return data

    def norm(self):
        if self.channel:
            vmin = self.experiment[self.channel].min()
            vmax = self.experiment[self.channel].max()
        elif self.condition:
            vmin = self.experiment[self.condition].min()
            vmax = self.experiment[self.condition].max()
        elif self.statistic:
            stat = self.experiment.statistics[self.statistic]
            try:
                vmin = min([min(x) for x in stat])
                vmax = max([max(x) for x in stat])
            except (TypeError, IndexError):
                vmin = stat.min()
                vmax = stat.max()
        elif self.data is not None:
            vmin = self.data.min()
            vmax = self.data.max()
        else:
            raise CytoflowError("Must set one of 'channel', 'condition' "
                                "or 'statistic'.")

        class HlogNormalize(matplotlib.colors.Normalize):
            def __init__(self, vmin, vmax, scale):
                self._scale = scale
                matplotlib.colors.Normalize.__init__(self, vmin, vmax)

            def __call__(self, value, clip=None):
                # as implemented here, hlog already transforms onto a (0, 1)
                # scale
                scaled_value = self._scale(value)
                return np.ma.masked_array(scaled_value)

        return HlogNormalize(vmin, vmax, self)

    def _get_range(self):
        if self.experiment:
            if self.channel and self.channel in self.experiment.channels:
                if "range" in self.experiment.metadata[self.channel]:
                    return self.experiment.metadata[self.channel]["range"]
                else:
                    return self.experiment.data[self.channel].max()
            elif self.condition and self.condition in self.experiment.conditions:
                return self.experiment.data[self.condition].max()
            elif self.statistic and self.statistic in self.experiment.statistics:
                return self.experiment.statistics[self.statistic].max()
            elif self.data.size > 0:
                return self.data.max()
            else:
                return Undefined
        else:
            return Undefined

    @cached_property
    def _get_mpl_params(self):
        return {"b": self.b, "range": self.range}
class ColorTranslationDiagnostic(HasStrictTraits):
    """
    Attributes
    ----------
    name : Str
        The instance name (for serialization, UI etc.)
    
    op : Instance(ColorTranslationOp)
        The op whose parameters we're viewing
        
    subset : str
        A Python expression specifying a subset of the events in the control 
        FCS files to plot
    """

    # traits
    id = Constant("edu.mit.synbio.cytoflow.view.colortranslationdiagnostic")
    friendly_id = Constant("Color Translation Diagnostic")

    subset = Str

    # TODO - why can't I use ColorTranslationOp here?
    op = Instance(IOperation)

    def plot(self, experiment, **kwargs):
        """
        Plot the plots
        
        Parameters
        ----------
        experiment : Experiment
            
        """

        if experiment is None:
            raise util.CytoflowViewError('experiment',
                                         "No experiment specified")

        if not self.op.controls and not self.op.controls_frames:
            raise util.CytoflowViewError('op', "No controls specified")

        if not self.op._trans_fn:
            raise util.CytoflowViewError(
                'op', "Transfer functions aren't set. "
                "Did you forget to call estimate()?")

        tubes = {}

        if (self.op.controls != {}):
            controls = self.op.controls
        else:
            controls = self.op.controls_frames

        translation = {x[0]: x[1] for x in list(controls.keys())}

        plt.figure()
        num_plots = len(list(controls.keys()))
        plt_idx = 0

        for from_channel, to_channel in translation.items():
            #             from_range = experiment.metadata[from_channel]['range']
            #             to_range = experiment.metadata[to_channel]['range']
            data = self.op._sample[(from_channel, to_channel)]
            from_min = data[from_channel].quantile(0.01)
            from_max = data[from_channel].quantile(0.99)
            to_min = data[to_channel].quantile(0.01)
            to_max = data[to_channel].quantile(0.99)

            if self.op.mixture_model:
                plt.subplot(num_plots, 2, plt_idx * 2 + 2)
                plt.xscale('log', nonpositive='mask')
                hist_bins = np.logspace(1,
                                        math.log(data[from_channel].max(), 2),
                                        num=128,
                                        base=2)
                _ = plt.hist(data[from_channel],
                             bins=hist_bins,
                             histtype='stepfilled',
                             linewidth=0,
                             antialiased=True)
                plt.xlabel(from_channel)

                plt.axvline(self.op._means[(from_channel, to_channel)][0],
                            color='r')
                plt.axvline(self.op._means[(from_channel, to_channel)][1],
                            color='r')

            num_cols = 2 if self.op.mixture_model else 1
            plt.subplot(num_plots, num_cols, plt_idx * num_cols + 1)
            plt.xscale('log', nonpositive='mask')
            plt.yscale('log', nonpositive='mask')
            plt.xlabel(from_channel)
            plt.ylabel(to_channel)
            plt.xlim(from_min, from_max)
            plt.ylim(to_min, to_max)
            # plt.ylim(to_max, to_min)

            # kwargs.setdefault('alpha', 0.2)
            kwargs.setdefault('s', 50)
            kwargs.setdefault('marker', 'o')

            plt.scatter(data[from_channel], data[to_channel], **kwargs)

            xs = np.logspace(1,
                             math.log(data[from_channel].max(), 2),
                             num=256,
                             base=2)
            trans_fn = self.op._trans_fn[(from_channel, to_channel)]
            # plt.plot(xs, trans_fn(xs), "--g")

            # y = data[to_channel]
            # yhat = trans_fn(data[from_channel])

            # rsq = r2_score(y, yhat)

            # ax = plt.gca()
            # plt.text(0.8,0.2,r'$R^2 = %.2f$' % rsq,ha='center',va='center',transform=ax.transAxes)
            plt.plot(xs, trans_fn(xs), "--r")

            plt_idx = plt_idx + 1
Beispiel #22
0
class ViolinPlotView(Base1DView):
    """Plots a violin plot -- a facetted set of kernel density estimates.
    
    Attributes
    ----------

    variable : Str
        the main variable by which we're faceting
    
        
    Examples
    --------
    
    Make a little data set.
    
    .. plot::
        :context: close-figs
            
        >>> import cytoflow as flow
        >>> import_op = flow.ImportOp()
        >>> import_op.tubes = [flow.Tube(file = "Plate01/RFP_Well_A3.fcs",
        ...                              conditions = {'Dox' : 10.0}),
        ...                    flow.Tube(file = "Plate01/CFP_Well_A4.fcs",
        ...                              conditions = {'Dox' : 1.0})]
        >>> import_op.conditions = {'Dox' : 'float'}
        >>> ex = import_op.apply()
        
    Plot a violin plot
    
    .. plot::
        :context: close-figs
    
        >>> flow.ViolinPlotView(channel = 'Y2-A',
        ...                     scale = 'log',
        ...                     variable = 'Dox').plot(ex)
    """
    
    # traits   
    id = Constant("edu.mit.synbio.cytoflow.view.violin")
    friendly_id = Constant("Violin Plot")

    variable = Str
    
    def plot(self, experiment, **kwargs):
        """
        Plot a violin plot of a variable
        
        Parameters
        ----------
        
        orient : "v" | "h", optional
            Orientation of the plot (vertical or horizontal). 
        
        bw : {{'scott', 'silverman', float}}, optional
            Either the name of a reference rule or the scale factor to use when
            computing the kernel bandwidth. The actual kernel size will be
            determined by multiplying the scale factor by the standard deviation of
            the data within each bin.

        scale : {{"area", "count", "width"}}, optional
            The method used to scale the width of each violin. If ``area``, each
            violin will have the same area. If ``count``, the width of the violins
            will be scaled by the number of observations in that bin. If ``width``,
            each violin will have the same width.
            
        scale_hue : bool, optional
            When nesting violins using a ``hue`` variable, this parameter
            determines whether the scaling is computed within each level of the
            major grouping variable (``scale_hue=True``) or across all the violins
            on the plot (``scale_hue=False``).
            
        gridsize : int, optional
            Number of points in the discrete grid used to compute the kernel
            density estimate.

        inner : {{"box", "quartile", "point", "stick", None}}, optional
            Representation of the datapoints in the violin interior. If ``box``,
            draw a miniature boxplot. If ``quartiles``, draw the quartiles of the
            distribution.  If ``point`` or ``stick``, show each underlying
            datapoint. Using ``None`` will draw unadorned violins.
            
        split : bool, optional
            When using hue nesting with a variable that takes two levels, setting
            ``split`` to True will draw half of a violin for each level. This can
            make it easier to directly compare the distributions.

        """
        
        if experiment is None:
            raise util.CytoflowViewError('experiment',
                                         "No experiment specified")
        
        if not self.variable:
            raise util.CytoflowViewError('variable',
                                         "Variable not specified")
        
        facets = [x for x in [self.xfacet, self.yfacet, self.huefacet, self.variable] if x]
        if len(facets) != len(set(facets)):
            raise util.CytoflowViewError("Can't reuse facets")
        
        super().plot(experiment, **kwargs)
        
    def _grid_plot(self, experiment, grid, xlim, ylim, xscale, yscale, **kwargs):

        kwargs.setdefault('orient', 'v')

        # since the 'scale' kwarg is already used
        kwargs['data_scale'] = xscale
                
        # set the scale for each set of axes; can't just call plt.xscale() 
        for ax in grid.axes.flatten():
            if kwargs['orient'] == 'h':
                ax.set_xscale(xscale.name, **xscale.mpl_params)  
            else:
                ax.set_yscale(xscale.name, **xscale.mpl_params)  
            
        # this order-dependent thing weirds me out.      
        if kwargs['orient'] == 'h':
            violin_args = [self.channel, self.variable]
        else:
            violin_args = [self.variable, self.channel]
            
        if self.huefacet:
            violin_args.append(self.huefacet)
            
        grid.map(_violinplot,   
                 *violin_args,      
                 order = np.sort(experiment[self.variable].unique()),
                 hue_order = (np.sort(experiment[self.huefacet].unique()) if self.huefacet else None),
                 **kwargs)
        
        if kwargs['orient'] == 'h':
            return {"yscale" : None}
        else:
            return {"xscale" : None}
Beispiel #23
0
class Target(HasTraits):

    # externally-specified parameters ...
    name = Constant(value=DEVICE, desc='target machine name on local network')
    signature = Instance(
        Signature,
        desc='Target color signature for identification by Wise Ball')
    monitor = Instance(Monitor, desc='Target screen')
    receiver = Instance(Receiver, desc='receiver of Wise Ball directives')
    directive = Directive
    # internally-managed parameters ...
    mouse = Instance(Mouse, factory=Mouse, args=())
    log = Instance(logging.Logger,
                   factory=logging.getLogger,
                   args=(name.default_value, ))

    @classmethod
    def create(_self_, settings):
        section = 'Target <:> {}'.format(
            DEVICE
        )  # [Section] name suffix (after <:>) assumed to be the machine name on local network
        assert settings.has_section(
            section), '{} Section not found in configuration file'.format(
                section)
        port = settings.getint(section, 'port')
        color = settings.get(section, 'color')
        signature = Signature(
            color=color
        )  # creating `image` instance prior to Tkinter.Tk instance creation raises: "RuntimeError: Too early to create image"
        monitor = Monitor()
        receiver = Receiver(port=port)
        parameters = dict(
            zip(('signature', 'monitor', 'receiver'),
                (signature, monitor, receiver)))
        return _self_(**parameters)

    def start(self):
        self.receiver.start()
        self.signature.show()

    def stop(self):
        self.receiver.stop()
        self.signature.remove()

    def execute(self):
        try:
            self.receiver.receive()
        except NoTransmission as details:
            self.signature.show()
        else:
            self.directive = self.receiver.transmission
            if self.mouse.is_relevant(self.directive):
                self.signature.remove(
                )  # never display signature if mouse directives are being received
                self.mouse.process(self.directive)
            else:
                self.signature.process(self.directive)

    def __str__(self):
        return '{}({}, {})'.format(self.__class__.__name__, self.name,
                                   str(self.signature))
Beispiel #24
0
class ArrayDataSource(AbstractDataSource):
    """ A data source representing a single, continuous array of numerical data.

    This class does not listen to the array for value changes; if you need that
    behavior, create a subclass that hooks up the appropriate listeners.
    """

    #------------------------------------------------------------------------
    # AbstractDataSource traits
    #------------------------------------------------------------------------

    # The dimensionality of the indices into this data source (overrides
    # AbstractDataSource).
    index_dimension = Constant('scalar')

    # The dimensionality of the value at each index point (overrides
    # AbstractDataSource).
    value_dimension = Constant('scalar')

    # The sort order of the data.
    # This is a specialized optimization for 1-D arrays, but it's an important
    # one that's used everywhere.
    sort_order = SortOrderTrait

    #------------------------------------------------------------------------
    # Private traits
    #------------------------------------------------------------------------

    # The data array itself.
    _data = NumericalSequenceTrait

    # Cached values of min and max as long as **_data** doesn't change.
    _cached_bounds = Tuple

    # Not necessary, since this is not a filter, but provided for convenience.
    _cached_mask = Any

    # The index of the (first) minimum value in self._data
    # FIXME: This is an Any instead of an Int trait because of how Traits
    # typechecks numpy.int64 on 64-bit Windows systems.
    _min_index = Any

    # The index of the (first) maximum value in self._data
    # FIXME: This is an Any instead of an Int trait because of how Traits
    # typechecks numpy.int64 on 64-bit Windows systems.
    _max_index = Any

    #------------------------------------------------------------------------
    # Public methods
    #------------------------------------------------------------------------

    def __init__(self, data=array([]), sort_order="none", **kw):
        AbstractDataSource.__init__(self, **kw)
        self.set_data(data, sort_order)
        return

    def set_data(self, newdata, sort_order=None):
        """ Sets the data, and optionally the sort order, for this data source.

        Parameters
        ----------
        newdata : array
            The data to use.
        sort_order : SortOrderTrait
            The sort order of the data
        """
        self._data = newdata
        if sort_order is not None:
            self.sort_order = sort_order
        self._compute_bounds()
        self.data_changed = True
        return

    def set_mask(self, mask):
        """ Sets the mask for this data source.
        """
        self._cached_mask = mask
        self.data_changed = True
        return

    def remove_mask(self):
        """ Removes the mask on this data source.
        """
        self._cached_mask = None
        self.data_changed = True
        return

    #------------------------------------------------------------------------
    # AbstractDataSource interface
    #------------------------------------------------------------------------

    def get_data(self):
        """ Returns the data for this data source, or 0.0 if it has no data.

        Implements AbstractDataSource.
        """
        if self._data is not None:
            return self._data
        else:
            return 0.0

    def get_data_mask(self):
        """get_data_mask() -> (data_array, mask_array)

        Implements AbstractDataSource.
        """
        if self._cached_mask is None:
            return self._data, ones(len(self._data), dtype=bool)
        else:
            return self._data, self._cached_mask

    def is_masked(self):
        """is_masked() -> bool

        Implements AbstractDataSource.
        """
        if self._cached_mask is not None:
            return True
        else:
            return False

    def get_size(self):
        """get_size() -> int

        Implements AbstractDataSource.
        """
        if self._data is not None:
            return len(self._data)
        else:
            return 0

    def get_bounds(self):
        """ Returns the minimum and maximum values of the data source's data.

        Implements AbstractDataSource.
        """
        if self._cached_bounds is None or self._cached_bounds == () or \
               self._cached_bounds == 0.0:
            self._compute_bounds()
        return self._cached_bounds

    def reverse_map(self, pt, index=0, outside_returns_none=True):
        """Returns the index of *pt* in the data source.

        Parameters
        ----------
        pt : scalar value
            value to find
        index
            ignored for data series with 1-D indices
        outside_returns_none : Boolean
            Whether the method returns None if *pt* is outside the range of
            the data source; if False, the method returns the value of the
            bound that *pt* is outside of.
        """
        if self.sort_order == "none":
            raise NotImplementedError

        # index is ignored for dataseries with 1-dimensional indices
        minval, maxval = self._cached_bounds
        if (pt < minval):
            if outside_returns_none:
                return None
            else:
                return self._min_index
        elif (pt > maxval):
            if outside_returns_none:
                return None
            else:
                return self._max_index
        else:
            return reverse_map_1d(self._data, pt, self.sort_order)

    #------------------------------------------------------------------------
    # Private methods
    #------------------------------------------------------------------------

    def _compute_bounds(self, data=None):
        """ Computes the minimum and maximum values of self._data.

        If a data array is passed in, then that is used instead of self._data.
        This behavior is useful for subclasses.
        """
        # TODO: as an optimization, perhaps create and cache a sorted
        #       version of the dataset?

        if data is None:
            # Several sources weren't setting the _data attribute, so we
            # go through the interface.  This seems like the correct thing
            # to do anyway... right?
            #data = self._data
            data = self.get_data()

        data_len = 0
        try:
            data_len = len(data)
        except:
            pass
        if data_len == 0:
            self._min_index = 0
            self._max_index = 0
            self._cached_bounds = (0.0, 0.0)
        elif data_len == 1:
            self._min_index = 0
            self._max_index = 0
            self._cached_bounds = (data[0], data[0])
        else:
            if self.sort_order == "ascending":
                self._min_index = 0
                self._max_index = -1
            elif self.sort_order == "descending":
                self._min_index = -1
                self._max_index = 0
            else:
                # ignore NaN values.  This is probably a little slower,
                # but also much safer.

                # data might be an array of strings or objects that
                # can't have argmin calculated on them.
                try:
                    # the data may be in a subclass of numpy.array, viewing
                    # the data as a ndarray will remove side effects of
                    # the subclasses, such as different operator behaviors
                    self._min_index = bounded_nanargmin(data.view(ndarray))
                    self._max_index = bounded_nanargmax(data.view(ndarray))
                except (TypeError, IndexError, NotImplementedError):
                    # For strings and objects, we punt...  These show up in
                    # label-ish data sources.
                    self._cached_bounds = (0.0, 0.0)

            self._cached_bounds = (data[self._min_index],
                                   data[self._max_index])
        return

    #------------------------------------------------------------------------
    # Event handlers
    #------------------------------------------------------------------------

    def _metadata_changed(self, event):
        self.metadata_changed = True

    def _metadata_items_changed(self, event):
        self.metadata_changed = True

    #------------------------------------------------------------------------
    # Persistence-related methods
    #------------------------------------------------------------------------

    def __getstate__(self):
        state = self.__dict__.copy()
        if not self.persist_data:
            state.pop("_data", None)
            state.pop("_cached_mask", None)
            state.pop("_cached_bounds", None)
            state.pop("_min_index", None)
            state.pop("_max_index", None)
        return state

    def _post_load(self):
        super(ArrayDataSource, self)._post_load()
        self._cached_bounds = ()
        self._cached_mask = None
        return
class SectionHeading(SettingBase):
  value = Constant('')

  def __init__(self, name):
    self.name = name
Beispiel #26
0
class FrameStatisticOp(HasStrictTraits):
    """
    Apply a function to subsets of a data set, and add it as a statistic
    to the experiment.
    
    The :meth:`apply` function groups the data by the variables in :attr:`by`, 
    then applies the :attr:`function` callable to each :class:`pandas.DataFrame` 
    subset.  The callable should take a :class:`DataFrame` as its only parameter.  
    The return type is arbitrary, but to be used with the rest of 
    :class:`cytoflow` it should probably be a numeric type or an iterable of 
    numeric types.
    
    Attributes
    ----------
    name : Str
        The operation name.  Becomes the first element in the
        :attr:`Experiment.statistics` key tuple.
        
    function : Callable
        The function used to compute the statistic.  Must take a 
        :class:`pandas.DataFrame` as its only argument.  The return type is 
        arbitrary, but to be used with the rest of :class:`cytoflow` it should 
        probably be a numeric type or an iterable of numeric types.  If 
        :attr:`statistic_name` is unset, the name of the function becomes the 
        second in element in the :attr:`Experiment.statistics` key tuple.
        
    statistic_name : Str
        The name of the function; if present, becomes the second element in
        the :attr:`Experiment.statistics` key tuple.  Particularly useful if 
        :attr:`function` is a lambda.
        
    by : List(Str)
        A list of metadata attributes to aggregate the data before applying the
        function.  For example, if the experiment has two pieces of metadata,
        ``Time`` and ``Dox``, setting ``by = ["Time", "Dox"]`` will apply 
        :attr:`function` separately to each subset of the data with a unique 
        combination of ``Time`` and ``Dox``.
        
    subset : Str
        A Python expression sent to Experiment.query() to subset the data before
        computing the statistic.
        
    fill : Any (default = 0)
        The value to use in the statistic if a slice of the data is empty.
   
    Examples
    --------
    
    >>> stats_op = FrameStatisticOp(name = "ByDox",
    ...                             function = lambda x: np.mean(x["FITC-A"],
    ...                             statistic_name = "Mean",
    ...                             by = ["Dox"])
    >>> ex2 = stats_op.apply(ex)
    """

    id = Constant('edu.mit.synbio.cytoflow.operations.statistics')
    friendly_id = Constant("Statistics")

    name = CStr
    function = Callable
    statistic_name = Str
    by = List(Str)
    subset = Str
    fill = Any(0)

    def apply(self, experiment):
        if experiment is None:
            raise util.CytoflowOpError('experiment', "No experiment specified")

        if not self.name:
            raise util.CytoflowOpError('name', "Must specify a name")

        if self.name != util.sanitize_identifier(self.name):
            raise util.CytoflowOpError(
                'name',
                "Name can only contain letters, numbers and underscores.".
                format(self.name))

        if not self.function:
            raise util.CytoflowOpError('function', "Must specify a function")

        if not self.by:
            raise util.CytoflowOpError(
                'by', "Must specify some grouping conditions "
                "in 'by'")

        stat_name = (self.name, self.statistic_name) \
                     if self.statistic_name \
                     else (self.name, self.function.__name__)

        if stat_name in experiment.statistics:
            raise util.CytoflowOpError(
                'name', "{} is already in the experiment's statistics".format(
                    stat_name))

        new_experiment = experiment.clone()

        if self.subset:
            try:
                experiment = experiment.query(self.subset)
            except Exception as e:
                raise util.CytoflowOpError(
                    'subset', "Subset string '{0}' isn't valid".format(
                        self.subset)) from e

            if len(experiment) == 0:
                raise util.CytoflowOpError(
                    'subset', "Subset string '{0}' returned no events".format(
                        self.subset))

        for b in self.by:
            if b not in experiment.conditions:
                raise util.CytoflowOpError(
                    'by', "Aggregation metadata {} not found, "
                    " must be one of {}".format(b, experiment.conditions))
            unique = experiment.data[b].unique()

            if len(unique) == 1:
                warn("Only one category for {}".format(b),
                     util.CytoflowOpWarning)

        groupby = experiment.data.groupby(self.by)

        for group, data_subset in groupby:
            if len(data_subset) == 0:
                warn("Group {} had no data".format(group),
                     util.CytoflowOpWarning)

        # this shouldn't be necessary, but see pandas bug #38053
        if len(self.by) == 1:
            idx = pd.Index(experiment[self.by[0]].unique(), name=self.by[0])
        else:
            idx = pd.MultiIndex.from_product(
                [experiment[x].unique() for x in self.by], names=self.by)

        stat = pd.Series(data=self.fill,
                         index=idx,
                         name="{} : {}".format(stat_name[0], stat_name[1]),
                         dtype=np.dtype(object)).sort_index()

        for group, data_subset in groupby:
            if len(data_subset) == 0:
                continue

            try:
                v = self.function(data_subset)

                stat.at[group] = v

            except Exception as e:
                raise util.CytoflowOpError(
                    'function',
                    "Your function threw an error in group {}".format(
                        group)) from e

            # check for, and warn about, NaNs.
            if pd.Series(stat.loc[group]).isna().any():
                warn("Category {} returned {}".format(group, stat.loc[group]),
                     util.CytoflowOpWarning)

        # try to convert to numeric, but if there are non-numeric bits ignore
        stat = pd.to_numeric(stat, errors='ignore')

        new_experiment.history.append(
            self.clone_traits(transient=lambda t: True))
        new_experiment.statistics[stat_name] = stat

        return new_experiment
Beispiel #27
0
class RatioOp(HasStrictTraits):
    """
    Create a new "channel" from the ratio of two other channels.
    
    Attributes
    ----------
    name : Str
        The operation name.  Also becomes the name of the new channel.
        
    numerator : Str
        The channel that is the numerator of the ratio.
        
    denominator : Str
        The channel that is the denominator of the ratio.

    Examples
    --------
    >>> ratio_op = flow.RatioOp()
    >>> ratio_op.numerator = "FITC-A"
    >>> ex5 = ratio_op.apply(ex4)
    """

    # traits
    id = Constant('edu.mit.synbio.cytoflow.operations.ratio')
    friendly_id = Constant("Ratio")

    name = Str
    numerator = Str
    denominator = Str

    def apply(self, experiment):
        """Applies the ratio operation to an experiment
        
        Parameters
        ----------
        experiment : Experiment
            the old experiment to which this op is applied
            
        Returns
        -------
        Experiment
            a new experiment with the new ratio channel
            
            The new channel also has the following new metadata:

            - **numerator** : Str
                What was the numerator channel for the new one?
        
            - **denominator** : Str
                What was the denominator channel for the new one?
    
        """

        if experiment is None:
            raise util.CytoflowOpError('experiment', "No experiment specified")

        if self.numerator not in experiment.channels:
            raise util.CytoflowOpError(
                'numerator',
                "Channel {0} not in the experiment".format(self.numerator))

        if self.denominator not in experiment.channels:
            raise util.CytoflowOpError(
                'denominator',
                "Channel {0} not in the experiment".format(self.denominator))

        if self.name != util.sanitize_identifier(self.name):
            raise util.CytoflowOpError(
                'name',
                "New channel {0} must be a valid Python identifier".format(
                    self.name))

        if self.name in experiment.channels:
            raise util.CytoflowOpError(
                'name', "New channel {0} is already in the experiment".format(
                    self.name))

        new_experiment = experiment.clone()
        new_experiment.add_channel(
            self.name,
            experiment[self.numerator] / experiment[self.denominator])
        new_experiment.data.replace([np.inf, -np.inf], np.nan, inplace=True)
        new_experiment.data.dropna(inplace=True)
        new_experiment.history.append(
            self.clone_traits(transient=lambda t: True))
        new_experiment.metadata[self.name]['numerator'] = self.numerator
        new_experiment.metadata[self.name]['denominator'] = self.denominator
        return new_experiment
 class TestClass(HasTraits):
     c_atr = Constant(5)
Beispiel #29
0
class LinearScale(ScaleMixin):
    """
    A scale that doesn't transform the data at all.
    """

    id = Constant("edu.mit.synbio.cytoflow.utility.linear_scale")
    name = "linear"

    experiment = Instance("cytoflow.Experiment")

    # none of these are actually used
    channel = Str
    condition = Str
    statistic = Tuple(Str, Str)
    error_statistic = Tuple(Str, Str)
    data = Array

    def __call__(self, data):
        return data

    def inverse(self, data):
        return data

    def clip(self, data):
        return data

    def norm(self, vmin=None, vmax=None):
        if vmin is not None and vmax is not None:
            pass
        elif self.channel:
            vmin = self.experiment[self.channel].min()
            vmax = self.experiment[self.channel].max()
        elif self.condition:
            vmin = self.experiment[self.condition].min()
            vmax = self.experiment[self.condition].max()
        elif self.statistic in self.experiment.statistics:
            stat = self.experiment.statistics[self.statistic]
            try:
                vmin = min([min(x) for x in stat])
                vmax = max([max(x) for x in stat])
            except (TypeError, IndexError):
                vmin = stat.min()
                vmax = stat.max()

            if self.error_statistic in self.experiment.statistics:
                err_stat = self.experiment.statistics[self.error_statistic]
                try:
                    vmin = min([min(x) for x in err_stat])
                    vmax = max([max(x) for x in err_stat])
                except (TypeError, IndexError):
                    vmin = vmin - err_stat.min()
                    vmax = vmax + err_stat.max()
        elif self.data.size > 0:
            vmin = self.data.min()
            vmax = self.data.max()
        else:
            raise CytoflowError("Must set one of 'channel', 'condition' "
                                "or 'statistic'.")

        return matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)

    def get_mpl_params(self, ax):
        return dict()
Beispiel #30
0
class BeadCalibrationDiagnostic(HasStrictTraits):
    """
    A diagnostic view for `BeadCalibrationOp`.
        
    Plots the smoothed histogram of the bead data; the peak locations;
    a scatter plot of the raw bead fluorescence values vs the calibrated unit 
    values; and a line plot of the model that was computed.  Make sure that the
    relationship is linear; if it's not, it likely isn't a good calibration!
    
    Attributes
    ----------
    op : Instance(BeadCalibrationOp)
        The operation instance whose parameters we're plotting.  Set 
        automatically if you created the instance using 
        :meth:`BeadCalibrationOp.default_view`.

    """
    
    # traits   
    id = Constant("edu.mit.synbio.cytoflow.view.beadcalibrationdiagnosticview")
    friendly_id = Constant("Bead Calibration Diagnostic")
        
    op = Instance(BeadCalibrationOp)
    
    def plot(self, experiment):
        """
        Plots the diagnostic view.
        
        Parameters
        ----------
        experiment : Experiment
            The experiment used to create the diagnostic plot.
        
        """

        if experiment is None:
            raise util.CytoflowViewError('experiment', "No experiment specified")

        channels = list(self.op.units.keys())

        if not channels:
            raise util.CytoflowViewError(None, "No channels to plot")

        if set(channels) != set(self.op._histograms.keys()):
            raise util.CytoflowViewError(None, "You must estimate the parameters "
                                               "before plotting")

        plt.figure()
        
        for idx, channel in enumerate(channels):            
            _, hist_bins, hist_smooth = self.op._histograms[channel]
                
            plt.subplot(len(channels), 2, 2 * idx + 1)
            plt.xscale('log')
            plt.xlabel(channel)
            plt.plot(hist_bins[1:], hist_smooth)
            
            plt.axvline(self.op.bead_brightness_threshold, color = 'blue', linestyle = '--' )
            if self.op.bead_brightness_cutoff:
                plt.axvline(self.op.bead_brightness_cutoff, color = 'blue', linestyle = '--' )
            else:
                plt.axvline(experiment.metadata[channel]['range'] * 0.7, color = 'blue', linestyle = '--')                

            if channel in self.op._peaks:
                for peak in self.op._peaks[channel]:
                    plt.axvline(peak, color = 'r')
                    
            if channel in self.op._peaks and channel in self.op._mefs:
                plt.subplot(len(channels), 2, 2 * idx + 2)
                plt.xscale('log')
                plt.yscale('log')
                plt.xlabel(channel)
                plt.ylabel(self.op.units[channel])
                plt.plot(self.op._peaks[channel], 
                         self.op._mefs[channel], 
                         marker = 'o')
                
                xmin, xmax = plt.xlim()
                x = np.logspace(np.log10(xmin), np.log10(xmax))
                plt.plot(x, 
                         self.op._calibration_functions[channel](x), 
                         color = 'r', linestyle = ':')
            
        plt.tight_layout(pad = 0.8)