コード例 #1
0
 def __init__(self):
     super().__init__({
         "coherence": Range(50, 100),
         "presample": Natural0,
         "highreward": Set([0, 1]),
         "blocktype": Set([1, 2])
     })
コード例 #2
0
class Solution(object):
    """Describes the result of an analytic or numerical DDM run.

    This is a glorified container for a joint pdf, between the
    response options (correct, error, and undecided) and the response
    time distribution associated with each.  It stores a copy of the
    response time distribution for both the correct case and the
    incorrect case, and the rest of the properties can be calculated
    from there.

    It also stores a full deep copy of the model used to simulate it.
    This is most important for storing, e.g. the dt and the name
    associated with the simulation, but it is also good to keep the
    whole object as a full record describing the simulation, so that
    the full parametrization of every run is recorded.  Note that this
    may increase memory requirements when many simulations are run.
    """
    @staticmethod
    def _test(v):
        # TODO should these be Positive0 instead of Number?
        assert v.corr in NDArray(d=1, t=Number), "Invalid corr histogram"
        assert v.err in NDArray(d=1, t=Number), "Invalid err histogram"
        if v.undec is not None:
            assert v.undec in NDArray(d=1, t=Number), "Invalid err histogram"
            assert len(v.undec) == len(
                v.model.x_domain(conditions=v.conditions))
        #assert v.model is Generic(Model), "Invalid model" # TODO could cause inf recursion issue
        assert len(v.corr) == len(v.err) == len(
            v.model.t_domain()), "Histogram lengths must match"
        assert 0 <= fsum(v.corr.tolist() + v.err.tolist()) <= 1, "Histogram does not integrate " \
            " to 1, not to " + str(fsum(v.corr.tolist() + v.err.tolist()))
        assert v.conditions in Conditions()

    @staticmethod
    def _generate():
        from .model import Model  # Importing here avoids a recursion issue
        m = Model()
        T = m.t_domain()
        lT = len(T)
        X = m.x_domain(conditions={})
        lX = len(X)
        # All undecided
        yield Solution(np.zeros(lT), np.zeros(lT), m,
                       next(Conditions().generate()))
        # Uniform
        yield Solution(
            np.ones(lT) / (2 * lT),
            np.ones(lT) / (2 * lT), m, next(Conditions().generate()))
        # With uniform undecided probability
        yield Solution(np.ones(lT) / (3 * lT),
                       np.ones(lT) / (3 * lT),
                       m,
                       next(Conditions().generate()),
                       pdf_undec=np.ones(lX) / (3 * lX))
        # With uniform undecided probability with collapsing bounds
        from .models.bound import BoundCollapsingExponential
        m2 = Model(bound=BoundCollapsingExponential(B=1, tau=1))
        T2 = m2.t_domain()
        lT2 = len(T2)
        X2 = m2.x_domain(conditions={})
        lX2 = len(X2)
        yield Solution(np.ones(lT2) / (3 * lT2),
                       np.ones(lT2) / (3 * lT2),
                       m2,
                       next(Conditions().generate()),
                       pdf_undec=np.ones(lX2) / (3 * lX2))

    def __init__(self, pdf_corr, pdf_err, model, conditions, pdf_undec=None):
        """Create a Solution object from the results of a model
        simulation.

        Constructor takes four arguments.

            - `pdf_corr` - a size N numpy ndarray describing the correct portion of the joint pdf
            - `pdf_err` - a size N numpy ndarray describing the error portion of the joint pdf
            - `model` - the Model object used to generate `pdf_corr` and `pdf_err`
            - `conditions` - a dictionary of condition names/values used to generate the solution
            - `pdf_undec` - a size M numpy ndarray describing the final state of the simulation.  None if unavailable.
        """
        self.model = copy.deepcopy(
            model
        )  # TODO this could cause a memory leak if I forget it is there...
        self.corr = pdf_corr
        self.err = pdf_err
        self.undec = pdf_undec
        # Correct floating point errors to always get prob <= 1
        if fsum(self.corr.tolist() + self.err.tolist()) > 1:
            self.corr /= 1.00000000001
            self.err /= 1.00000000001
        self.conditions = conditions

    def __eq__(self, other):
        if not np.allclose(self.corr, other.corr) or \
           not np.allclose(self.err, other.err):
            return False
        for k in self.conditions:
            if k not in other.conditions:
                return False
            if np.issubdtype(self.conditions[k][0].dtype, np.floating) and \
               np.issubdtype(self.conditions[k][0].dtype, np.floating):
                compare_func = np.allclose
            else:
                compare_func = lambda x, y: np.all(np.equal(x, y))
            if not compare_func(self.conditions[k][0], other.conditions[k][0]) or \
               not compare_func(self.conditions[k][1], other.conditions[k][1]):
                return False
            if len(self.conditions[k]) == 3 and \
               len(other.conditions[k]) == 3 and \
               not compare_func(self.conditions[k][2], other.conditions[k][2]):
                return False
        if self.undec is not None:
            if not np.allclose(self.undec, other.undec):
                return False
        return True

    @accepts(Self)
    @returns(NDArray(d=1, t=Positive0))
    def pdf_corr(self):
        """The correct component of the joint PDF."""
        return self.corr / self.model.dt

    @accepts(Self)
    @returns(NDArray(d=1, t=Positive0))
    def pdf_err(self):
        """The error (incorrect) component of the joint PDF."""
        return self.err / self.model.dt

    @accepts(Self)
    @returns(NDArray(d=1, t=Positive0))
    @requires("self.undec is not None")
    def pdf_undec(self):
        """The final state of the simulation, same size as `x_domain()`.

        If the model contains overlays, this represents the final
        state of the simulation *before* the overlays are applied.
        This is because overlays do not specify what to do with the
        diffusion locations corresponding to undercided probabilities.
        Additionally, all of the necessary information may not be
        stored, such as the case with a non-decision time overlay.

        This means that in the case of models with a non-decision time
        t_nd, this gives the undecided probability at time T_dur +
        t_nd.

        If no overlays are in the model, then pdf_corr() + pdf_err() +
        pdf_undec() should always equal 1 (plus or minus floating
        point errors).
        """
        # Do this here to avoid import recursion
        from .models.overlay import OverlayNone
        # Common mistake so we want to warn the user of any possible
        # misunderstanding.
        if not isinstance(self.model.get_dependence("overlay"), OverlayNone):
            print(
                "WARNING: Undecided probability accessed for model with overlays.  "
                "Undecided probability applies *before* overlays.  Please see the "
                "pdf_undec docs for more information and to prevent misunderstanding."
            )
        if self.undec is not None:
            return self.undec / self.model.dx
        else:
            raise ValueError("Final state unavailable")

    @accepts(Self)
    @returns(NDArray(d=1, t=Positive0))
    def cdf_corr(self):
        """The correct component of the joint CDF."""
        return np.cumsum(self.corr)

    @accepts(Self)
    @returns(NDArray(d=1, t=Positive0))
    def cdf_err(self):
        """The error (incorrect) component of the joint CDF."""
        return np.cumsum(self.err)

    @accepts(Self)
    @returns(Range(0, 1))
    def prob_correct(self):
        """Probability of correct response within the time limit."""
        return fsum(self.corr)

    @accepts(Self)
    @returns(Range(0, 1))
    def prob_error(self):
        """Probability of incorrect (error) response within the time limit."""
        return fsum(self.err)

    @accepts(Self)
    @returns(Range(0, 1))
    def prob_undecided(self):
        """The probability of not responding during the time limit."""
        udprob = 1 - fsum(self.corr.tolist() + self.err.tolist())
        if udprob < 0:
            print("Warning, setting undecided probability from %f to 0" %
                  udprob)
            udprob = 0
        return udprob

    @accepts(Self)
    @returns(Range(0, 1))
    def prob_correct_forced(self):
        """Probability of correct response if a response is forced.

        Forced responses are selected randomly."""
        return self.prob_correct() + self.prob_undecided() / 2.

    @accepts(Self)
    @returns(Range(0, 1))
    def prob_error_forced(self):
        """Probability of incorrect response if a response is forced.

        Forced responses are selected randomly."""
        return self.prob_error() + self.prob_undecided() / 2.

    @accepts(Self)
    @returns(Range(0, 1))
    @requires("self.undec is not None")
    def prob_correct_sign(self):
        """Probability of correct response if a response is forced.

        Forced responses are selected by the position of the decision
        variable at the end of the time limit T_dur.

        This is only available for the implicit method.
        """
        return self.prob_correct() + np.sum(
            self.undec[len(self.undec) // 2 + 1:])

    @accepts(Self)
    @returns(Range(0, 1))
    @requires("self.undec is not None")
    def prob_error_sign(self):
        """Probability of incorrect response if a response is forced.

        Forced responses are selected by the position of the decision
        variable at the end of the time limit T_dur.

        This is only available for the implicit method.
        """
        return self.prob_error() + np.sum(self.undec[:len(self.undec) // 2])

    @accepts(Self)
    @requires('self.prob_correct() > 0')
    @returns(Positive0)
    def mean_decision_time(self):
        """The mean decision time in the correct trials (excluding undecided trials)."""
        return fsum((self.corr) * self.model.t_domain()) / self.prob_correct()

    @accepts(Self, Natural1, seed=Natural0)
    @returns(Sample)
    @ensures("len(return) == k")
    def resample(self, k=1, seed=0):
        """Generate a list of reaction times sampled from the PDF.

        `k` is the number of TRIALS, not the number of samples.  Since
        we are only showing the distribution from the correct trials,
        we guarantee that, for an identical seed, the sum of the two
        return values will be less than `k`.  If no undecided
        trials exist, the sum of return values will be equal to `k`.

        This relies on the assumption that reaction time cannot be
        less than 0.

        Returns a Sample object representing the distribution.
        """
        # Exclude the last point in the t domain because we will add
        # uniform noise to the sample and this would put us over the
        # model's T_dur.
        shorter_t_domain = self.model.t_domain()[:-1]
        shorter_pdf_corr = self.pdf_corr()[:-1]
        shorter_pdf_corr[-1] += self.pdf_corr()[-1]
        shorter_pdf_err = self.pdf_err()[:-1]
        shorter_pdf_err[-1] += self.pdf_err()[-1]
        # Concatenate the correct and error distributions as well as
        # their probabilities, and add an undecided component on the
        # end.  Shift the error t domain by the maximum plus one.
        shift = np.max(shorter_t_domain) + 1
        combined_domain = list(shorter_t_domain) + list(shorter_t_domain +
                                                        shift) + [-1]
        combined_probs = list(shorter_pdf_corr * self.model.dt) + list(
            shorter_pdf_err * self.model.dt) + [self.prob_undecided()]
        if fsum(combined_probs) != 1:
            print("Warning, distribution sums to %f rather than 1" %
                  fsum(combined_probs))
        # Each point x on the pdf represents the space from x to x+dt.
        # So sample and then add uniform noise to each element.
        samp = np.random.choice(combined_domain,
                                p=combined_probs,
                                replace=True,
                                size=k)
        samp += np.random.uniform(0, self.model.dt, k)

        aa = np.asarray
        undecided = np.sum(samp == -1)
        samp = samp[samp != -1]  # Remove undecided trials
        # Find correct and error trials
        corr_sample = samp[samp < shift]
        err_sample = samp[samp >= shift] - shift
        # Build Sample object
        conditions = {
            k: (aa([v] * len(corr_sample)), aa([v] * len(err_sample)),
                aa([v] * int(undecided)))
            for k, v in self.conditions.items()
        }
        return Sample(corr_sample, err_sample, undecided, **conditions)
コード例 #3
0
ファイル: sample.py プロジェクト: stefan-f-bucher/PyDDM
class Sample(object):
    """Describes a sample from some (empirical or simulated) distribution.

    Similarly to Solution, this is a glorified container for three
    items: a list of correct reaction times, a list of error reaction
    times, and the number of undecided trials.  Each can have
    different properties associated with it, known as "conditions"
    elsewhere in this codebase.  This is to specifiy the experimental
    parameters of the trial, to allow fitting of stimuli by (for
    example) color or intensity.

    To specify conditions, pass a keyword argument to the constructor.
    The name should be the name of the property, and the value should
    be a tuple of length two or three.  The first element of the tuple
    should be a list of length equal to the number of correct trials,
    and the second should be equal to the number of error trials.  If
    there are any undecided trials, the third argument should
    contain a list of length equal to `undecided`.

    Optionally, additional data can be associated with each
    independent data point.  These should be passed as keyword
    arguments, where the keyword name is the property and the value is
    a tuple.  The tuple should have either two or three elements: the
    first two should be lists of properties for the correct and error
    reaction times, where the properties correspond to reaction times
    in the correct or error lists.  Optionally, a third list of length
    equal to the number of undecided trials gives a list of conditions
    for these trials.  If multiple properties are passed as keyword
    arguments, the ordering of the undecided properties (in addition
    to those of the correct and error distributions) will correspond
    to one another.
    """
    @classmethod
    def _test(cls, v):
        # Most testing is done in the constructor and the data is read
        # only, so this isn't strictly necessary
        assert type(v) is cls
    @staticmethod
    def _generate():
        aa = lambda x : np.asarray(x)
        yield Sample(aa([.1, .2, .3]), aa([.2, .3, .4]), undecided=0)
        yield Sample(aa([.1, .2, .3]), aa([]), undecided=0)
        yield Sample(aa([]), aa([.2, .3, .4]), undecided=0)
        yield Sample(aa([.1, .2, .3]), aa([.2, .3, .4]), undecided=5)
        
    def __init__(self, sample_corr, sample_err, undecided=0, **kwargs):
        assert sample_corr in NDArray(d=1, t=Number), "sample_corr not a numpy array, it is " + str(type(sample_corr))
        assert sample_err in NDArray(d=1, t=Number), "sample_err not a numpy array, it is " + str(type(sample_err))
        assert undecided in Natural0(), "undecided not a natural number"
        self.corr = sample_corr
        self.err = sample_err
        self.undecided = undecided
        # Values should not change
        self.corr.flags.writeable = False
        self.err.flags.writeable = False
        # Make sure the kwarg parameters/conditions are in the correct
        # format
        for _,v in kwargs.items():
            # Make sure shape and type are correct
            assert isinstance(v, tuple)
            assert len(v) in [2, 3]
            assert v[0] in NDArray(d=1)
            assert v[1] in NDArray(d=1)
            assert len(v[0]) == len(self.corr)
            assert len(v[1]) == len(self.err)
            # Make read-only
            v[0].flags.writeable = False
            v[1].flags.writeable = False
            if len(v) == 3:
                assert len(v[2]) == undecided
            else:
                assert undecided == 0
        self.conditions = kwargs
    def __len__(self):
        """The number of samples"""
        return len(self.corr) + len(self.err) + self.undecided
    def __iter__(self):
        """Iterate through each reaction time, with no regard to whether it was a correct or error trial."""
        return np.concatenate([self.corr, self.err]).__iter__()
    def __eq__(self, other):
        if not np.allclose(self.corr, other.corr) or \
           not np.allclose(self.err, other.err) or \
           self.undecided != other.undecided:
            return False
        for k in self.conditions:
            if k not in other.conditions:
                return False
            if np.issubdtype(self.conditions[k][0].dtype, np.floating) and \
               np.issubdtype(self.conditions[k][0].dtype, np.floating):
                compare_func = np.allclose
            else:
                compare_func = lambda x,y: np.all(x == y)
            if not compare_func(self.conditions[k][0], other.conditions[k][0]) or \
               not compare_func(self.conditions[k][1], other.conditions[k][1]):
                return False
            if len(self.conditions[k]) == 3 and \
               len(other.conditions[k]) == 3 and \
               not compare_func(self.conditions[k][2], other.conditions[k][2]):
                return False
        return True
    def __add__(self, other):
        assert sorted(self.conditions.keys()) == sorted(other.conditions.keys()), "Canot add with unlike conditions"
        corr = np.concatenate([self.corr, other.corr])
        err = np.concatenate([self.err, other.err])
        undecided = self.undecided + other.undecided
        conditions = {}
        for k in self.conditions.keys():
            sc = self.conditions
            oc = other.conditions
            bothc = np.concatenate([sc[k][0], oc[k][0]])
            bothe = np.concatenate([sc[k][1], oc[k][1]])
            bothn = np.concatenate([sc[k][2] if len(sc[k]) == 3 else [],
                                    oc[k][2] if len(oc[k]) == 3 else []])
            conditions[k] = (bothc, bothe, bothn)
        return Sample(corr, err, undecided, **conditions)
    @staticmethod
    @accepts(NDArray(d=2, t=Number), List(String))
    @returns(Self)
    @requires('data.shape[1] >= 2')
    @requires('set(list(data[:,1])) - {0, 1} == set()')
    @requires('all(data[:,0].astype("float") == data[:,0])')
    @requires('data.shape[1] - 2 == len(column_names)')
    @ensures('len(column_names) == len(return.condition_names())')
    def from_numpy_array(data, column_names):
        """Generate a Sample object from a numpy array.
        
        `data` should be an n x m array (n rows, m columns) where
        m>=2. The first column should be the response times, and the
        second column should be whether the trial was correct or an
        error (1 == correct, 0 == error).  Any remaining columns
        should be conditions.  `column_names` should be a list of
        length m of strings indicating the names of the conditions.
        The order of the names should correspond to the order of the
        columns.  This function does not yet work with undecided
        trials.
        """
        c = data[:,1].astype(bool)
        nc = (1-data[:,1]).astype(bool)
        def pt(x): # Pythonic types
            arr = np.asarray(x)
            if np.all(arr == np.round(arr)):
                arr = arr.astype(int)
            return arr

        conditions = {k: (pt(data[c,i+2]), pt(data[nc,i+2]), []) for i,k in enumerate(column_names)}
        return Sample(pt(data[c,0]), pt(data[nc,0]), 0, **conditions)
    @staticmethod
    @accepts(Unchecked, String, String) # TODO change unchecked to pandas
    @returns(Self)
    @requires('df.shape[1] >= 2')
    @requires('rt_column_name in df')
    @requires('correct_column_name in df')
    @requires('not np.any(np.isnan(df))')
    @requires('len(np.setdiff1d(df[correct_column_name], [0, 1])) == 0')
    @requires('all(df[rt_column_name].astype("float") == df[rt_column_name])')
    @ensures('len(df) == len(return)')
    def from_pandas_dataframe(df, rt_column_name, correct_column_name):
        """Generate a Sample object from a pandas dataframe.
        
        `df` should be a pandas array.  `rt_column_name` and
        `correct_column_name` should be strings, and `df` should
        contain columns by these names. The column with the name
        `rt_column_name` should be the response times, and the column
        with the name `correct_column_name` should be whether the
        trial was correct or an error (1 == correct, 0 == error).  Any
        remaining columns should be conditions.  This function does
        not yet work with undecided trials.
        """
        if np.mean(df[rt_column_name]) > 50:
            print("Warning: RTs should be specified in seconds, not milliseconds")
        c = df[correct_column_name].astype(bool)
        nc = (1-df[correct_column_name]).astype(bool)
        def pt(x): # Pythonic types
            arr = np.asarray(x)
            if np.all(arr == np.round(arr)):
                arr = arr.astype(int)
            return arr

        column_names = [e for e in df.columns if not e in [rt_column_name, correct_column_name]]
        conditions = {k: (pt(df[c][k]), pt(df[nc][k]), []) for k in column_names}
        return Sample(pt(df[c][rt_column_name]), pt(df[nc][rt_column_name]), 0, **conditions)
    def items(self, correct):
        """Iterate through the reaction times.

        This takes only one argument: a boolean `correct`, true if we
        want to iterate through the correct trials, and false if we
        want to iterate through the error trials.  

        For each iteration, a two-tuple is returned.  The first
        element is the reaction time, the second is a dictionary
        containing the conditions associated with that reaction time.
        """
        return _Sample_Iter_Wraper(self, correct=correct)
    @accepts(Self)
    @returns(Self)
    def subset(self, **kwargs):
        """Subset the data by filtering based on specified properties.

        Each keyword argument should be the name of a property.  These
        keyword arguments may have one of three values:

        - A list: For each element in the returned subset, the
          specified property is in this list of values.
        - A function: For each element in the returned subset, the
          specified property causes the function to evaluate to True.
        - Anything else: Each element in the returned subset must have
          this value for the specified property.

        Return a sample object representing the filtered sample.
        """
        mask_corr = np.ones(len(self.corr)).astype(bool)
        mask_err = np.ones(len(self.err)).astype(bool)
        mask_undec = np.ones(self.undecided).astype(bool)
        for k,v in kwargs.items():
            if hasattr(v, '__call__'):
                mask_corr = np.logical_and(mask_corr, [v(i) for i in self.conditions[k][0]])
                mask_err = np.logical_and(mask_err, [v(i) for i in  self.conditions[k][1]])
                mask_undec = [] if self.undecided == 0 else np.logical_and(mask_undec, [v(i) for i in self.conditions[k][2]])
            elif hasattr(v, '__contains__'):
                mask_corr = np.logical_and(mask_corr, [i in v for i in self.conditions[k][0]])
                mask_err = np.logical_and(mask_err, [i in v for i in self.conditions[k][1]])
                mask_undec = [] if self.undecided == 0 else np.logical_and(mask_undec, [i in v for i in self.conditions[k][2]])
            else:
                mask_corr = np.logical_and(mask_corr, [i == v for i in self.conditions[k][0]])
                mask_err = np.logical_and(mask_err, [i == v for i in self.conditions[k][1]])
                mask_undec = [] if self.undecided == 0 else np.logical_and(mask_undec, [i == v for i in self.conditions[k][2]])
        filtered_conditions = {k : (np.asarray(list(itertools.compress(v[0], mask_corr))),
                                    np.asarray(list(itertools.compress(v[1], mask_err))),
                                    (np.asarray(list(itertools.compress(v[2], mask_undec))) if len(v) == 3 else np.asarray([])))
                               for k,v in self.conditions.items()}
        return Sample(np.asarray(list(itertools.compress(self.corr, list(mask_corr)))),
                      np.asarray(list(itertools.compress(self.err, list(mask_err)))),
                      sum(mask_undec),
                      **filtered_conditions)
    @accepts(Self)
    @returns(List(String))
    def condition_names(self):
        """The names of conditions which hold some non-zero value in this sample."""
        return list(self.conditions.keys())
    @accepts(Self, String)
    @requires('cond in self.condition_names()')
    @returns(List(Unchecked))
    def condition_values(self, cond):
        """The values of a condition that have at least one element in the sample.

        `cond` is the name of the condition from which to get the
        observed values.  Returns a list of these values.
        """
        cs = self.conditions
        cvs = set(cs[cond][0]).union(set(cs[cond][1]))
        if len(cs[cond]) == 3:
            cvs = cvs.union(set(cs[cond][2]))
        return sorted(list(cvs))
    @accepts(Self, Or(Nothing, List(String)))
    @returns(List(Conditions))
    def condition_combinations(self, required_conditions=None):
        """Get all values for set conditions and return every combination of them.

        Since PDFs of solved models in general depend on all of the
        conditions, this returns a list of dictionaries.  The keys of
        each dictionary are the names of conditions, and the value is
        a particular value held by at least one element in the sample.
        Each list contains all possible combinations of condition values.

        If `required_conditions` is iterable, only the conditions with
        names found within `required_conditions` will be included.
        """
        cs = self.conditions
        conditions = []
        names = self.condition_names()
        if required_conditions is not None:
            names = [n for n in names if n in required_conditions]
        for c in names:
            conditions.append(list(set(cs[c][0]).union(set(cs[c][1]))))
        combs = []
        for p in itertools.product(*conditions):
            if len(self.subset(**dict(zip(names, p)))) != 0:
                combs.append(dict(zip(names, p)))
        if len(combs) == 0: # Generally not needed since iterools.product does this
            return [{}]
        return combs

    @staticmethod
    @accepts(dt=Positive, T_dur=Positive)
    @returns(NDArray(d=1, t=Positive0))
    #@requires('T_dur/dt < 1e5') # Too large of a number
    def t_domain(dt=.01, T_dur=2):
        """The times that corresponds with pdf/cdf_corr/err parameters (their support)."""
        return np.linspace(0, T_dur, int(T_dur/dt)+1)

    @accepts(Self, dt=Positive, T_dur=Positive)
    @returns(NDArray(d=1, t=Positive0))
    #@requires('T_dur/dt < 1e5') # Too large of a number
    @ensures('len(return) == len(self.t_domain(dt=dt, T_dur=T_dur))')
    def pdf_corr(self, dt=.01, T_dur=2):
        """The correct component of the joint PDF."""
        return np.histogram(self.corr, bins=int(T_dur/dt)+1, range=(0-dt/2, T_dur+dt/2))[0]/len(self)/dt # dt/2 terms are for continuity correction

    @accepts(Self, dt=Positive, T_dur=Positive)
    @returns(NDArray(d=1, t=Positive0))
    #@requires('T_dur/dt < 1e5') # Too large of a number
    @ensures('len(return) == len(self.t_domain(dt=dt, T_dur=T_dur))')
    def pdf_err(self, dt=.01, T_dur=2):
        """The error (incorrect) component of the joint PDF."""
        return np.histogram(self.err, bins=int(T_dur/dt)+1, range=(0-dt/2, T_dur+dt/2))[0]/len(self)/dt # dt/2 terms are for continuity correction

    @accepts(Self, dt=Positive, T_dur=Positive)
    @returns(NDArray(d=1, t=Positive0))
    #@requires('T_dur/dt < 1e5') # Too large of a number
    @ensures('len(return) == len(self.t_domain(dt=dt, T_dur=T_dur))')
    def cdf_corr(self, dt=.01, T_dur=2):
        """The correct component of the joint CDF."""
        return np.cumsum(self.pdf_corr(dt=dt, T_dur=T_dur))*dt

    @accepts(Self, dt=Positive, T_dur=Positive)
    @returns(NDArray(d=1, t=Positive0))
    @ensures('len(return) == len(self.t_domain(dt=dt, T_dur=T_dur))')
    def cdf_err(self, dt=.01, T_dur=2):
        """The error (incorrect) component of the joint CDF."""
        return np.cumsum(self.pdf_err(dt=dt, T_dur=T_dur))*dt

    @accepts(Self)
    @returns(Range(0, 1))
    @requires("len(self) > 0")
    def prob_correct(self):
        """The probability of selecting the right response."""
        return len(self.corr)/len(self)

    @accepts(Self)
    @returns(Range(0, 1))
    @requires("len(self) > 0")
    def prob_error(self):
        """The probability of selecting the incorrect (error) response."""
        return len(self.err)/len(self)

    @accepts(Self)
    @returns(Range(0, 1))
    @requires("len(self) > 0")
    def prob_undecided(self):
        """The probability of selecting neither response (undecided)."""
        return self.undecided/len(self)

    @accepts(Self)
    @returns(Range(0, 1))
    @requires("len(self) > 0")
    def prob_correct_forced(self):
        """The probability of selecting the correct response if a response is forced."""
        return self.prob_correct() + self.prob_undecided()/2.

    @accepts(Self)
    @returns(Range(0, 1))
    @requires("len(self) > 0")
    def prob_error_forced(self):
        """The probability of selecting the incorrect response if a response is forced."""
        return self.prob_error() + self.prob_undecided()/2.
コード例 #4
0
 def _test(v):
     assert v.pmixturecoef in Range(0, 1), "Invalid pmixture coef"
     assert v.ratehr in Positive(), "Invalid rate"
     assert v.ratelr in Positive(), "Invalid rate"
コード例 #5
0
 def _test(v):
     assert v.mappingcoef in Range(0, 1), "Invalid mapping coefficient"
コード例 #6
0
# line for the interactive demo.

from paranoid.decorators import accepts, returns, requires, ensures, paranoidclass
from paranoid.types import RangeOpenClosed, RangeOpen, Range, Positive0, NDArray, ParametersDict, Natural0, Set, Self, Number, Positive
import ddm
import numpy as np
import scipy

# Note that BEGIN and END statements appear throughout this file, and
# are markers for keeping the file in sync with the documentation in
# the PyDDM Cookbook.


# BEGIN utility_functions
# Paranoid annotations for correctness
@accepts(Range(50, 100), RangeOpen(0, 10), RangeOpenClosed(50, 100))
@requires("coh <= max_coh")
@returns(Range(0, 1))
@ensures("return == 0 <--> coh == 50")
@ensures("return == 1 <--> coh == max_coh")
# Monotonic increasing in coh, decreasing in exponent
@ensures(
    "coh >= coh` and exponent <= exponent` and max_coh == max_coh` --> return >= return`"
)
def coh_transform(coh, exponent, max_coh):
    """Transform coherence to range 0-1.

    `coh` should be in range 50-`max_coh`, and exponent greater than
    0.  Returns a number 0-1 via nonlinearity `exponent`.
    """
    coh_coef = (coh - 50) / (max_coh - 50)