Beispiel #1
0
def solve_all_conditions(model, sample, conditions={}, method=None):
    """Solve the model for all conditions relevant to the sample.

    This takes the following parameters:

    - `model` - A Model() object
    - `sample` - A Sample() object which has conditions for each of
      the required conditions in `model`
    - `conditions` - Restrict to specific conditions
    - `method` - A string describing the solver method.  Can be
      "analytical", "numerical", "cn", "implicit", or "explicit".

    For each value of each relevant condition in sample (i.e. those in
    the model's required conditions), this will solve the model for
    that set of parameters.  It returns a dictionary indexed by a
    frozenset of the condition names and values, with the Solution
    object as the value, e.g.:
    
        {frozenset({('reward', 3)}): <Solution object>,
         frozenset({('reward', 1)}): <Solution object>}

    This function will automatically parallelize if set_N_cpus() has
    been called.
    """

    conds = sample.condition_combinations(
        required_conditions=model.required_conditions)
    if method is None:
        meth = model.solve
    elif method == "analytical":
        meth = model.solve_analytical
    elif method == "numerical":
        meth = model.solve_numerical
    elif method == "cn":
        meth = model.solve_numerical_cn
    elif method == "implicit":
        meth = model.solve_numerical_implicit
    elif method == "explicit":
        meth = model.solve_numerical_explicit
    else:
        raise ValueError("Invalid method " + method)

    cache = {}
    if _parallel_pool is None:  # No parallelization
        for c in conds:
            cache[frozenset(c.items())] = meth(conditions=c)
        return cache
    else:  # Parallelize across pool
        if paranoid_settings.get('enabled') is False:
            # The *2 makes sure that this runs on all subprocesses,
            # since you can't broadcast commands to all processes
            _parallel_pool.map(lambda x: paranoid_settings.set(enabled=False),
                               [None] * _parallel_pool.n_cpus * 2)
        sols = _parallel_pool.map(meth, conds, chunksize=1)
        for c, s in zip(conds, sols):
            cache[frozenset(c.items())] = s
        return cache
Beispiel #2
0
    def test_scoping_of_namespace(self):
        """Names in 'namespace' setting overridden by same-name argument"""
        Settings.get("namespace").update({"theval": 3})

        @pd.accepts(theval=pt.Integer)
        @pd.ensures("theval != 3")
        def func(theval):
            pass

        func(2)
        fails(lambda: func(3))
Beispiel #3
0
 def test_syntactic_sugar_set(self):
     """The nice interface to change settings is working"""
     prevval = Settings.get("enabled")
     Settings.set(enabled=False)
     assert Settings.get("enabled") == False
     Settings.set(enabled=True)
     assert Settings.get("enabled") == True
     Settings._set("enabled", prevval)
Beispiel #4
0
 def test_function_local_override(self):
     """Functions can locally override global settings"""
     f1 = lambda x: x
     prevval = Settings.get("max_cache")
     Settings._set("max_cache", 1234)
     assert Settings.get("max_cache", function=f1) == 1234
     Settings._set("max_cache", 2345, function=f1)
     assert Settings.get("max_cache", function=f1) == 2345
     Settings._set("max_cache", prevval)
Beispiel #5
0
 def test_set_setting_consistent(self):
     """Settings consistent when imported under different names"""
     import paranoid.settings as ps1
     import paranoid.settings as ps2
     from paranoid.settings import Settings as ps3
     prevval = ps1.Settings.get("max_cache")
     ps1.Settings._set("max_cache", 1234)
     assert ps2.Settings.get("max_cache") == 1234
     assert ps3.get("max_cache") == 1234
     ps1.Settings._set("max_cache", prevval)
Beispiel #6
0
 def test_set_setting(self):
     """Assign a value to a setting"""
     # "enabled" is boolean
     prevval = Settings.get("enabled")
     Settings._set("enabled", not prevval)
     assert Settings.get("enabled") == (not prevval)
     Settings._set("enabled", prevval)
Beispiel #7
0
    def test_disable_paranoid(self):
        """Make sure we can disable paranoid scientist"""
        @pd.accepts(pt.Boolean)
        @pd.returns(pt.Boolean)
        def not_boolean(x):
            return int(x + 3)

        prevval = Settings.get("enabled")
        Settings.set(enabled=False)
        not_boolean(5)
        Settings.set(enabled=True)
        fails(lambda: not_boolean(5))
        Settings._set("enabled", prevval)
Beispiel #8
0
def fit_adjust_model(sample,
                     model,
                     fitparams=None,
                     method="differential_evolution",
                     lossfunction=LossLikelihood,
                     verify=False):
    """Modify parameters of a model which has already been fit.
    
    The data `sample` should be a Sample object of the reaction times
    to fit in seconds (NOT milliseconds).  At least one of the
    parameters for one of the components in the model should be a
    "Fitted()" instance, as these will be the parameters to fit.
    
    `method` specifies how the model should be fit.
    "differential_evolution" is the default, which seems to be able to
    accurately locate the global maximum without using a
    derivative. "simple" uses a derivative-based method to minimize,
    and just uses randomly initialized parameters and gradient
    descent.  "basin" uses "scipy.optimize.basinhopping" to find an
    optimal solution, which is much slower but also gives better
    results than "simple".  It does not appear to give better results
    than "differential_evolution".  Alternatively, a custom objective
    function may be used by setting `method` to be a function which
    accepts the "x_0" parameter (for starting position) and
    "constraints" (for min and max values).

    `fitparams` is a dictionary of kwargs to be passed directly to the
    minimization routine for fine-grained low-level control over the
    optimization.  Normally this should not be needed.  

    `lossfunction` is a subclass of LossFunction representing the
    method to use when calculating the goodness-of-fit.  Pass the
    subclass itself, NOT an instance of the subclass.

    `name` gives the name of the model after it is fit.

    If `verify` is False (the default), checking for programming
    errors is disabled during the fit. This can decrease runtime and
    may prevent crashes.  If verification is already disabled, this
    does not re-enable it.

    Returns the same model object that was passed to it as an
    argument.  However, the parameters will be modified.  The model is
    modified in place, so a reference is returned to it for
    convenience only.

    This function will automatically parallelize if set_N_cpus() has
    been called.

    """
    # Disable paranoid if `verify` is False.
    paranoid_state = paranoid_settings.get('enabled')
    if paranoid_state and not verify:
        paranoid_settings.set(enabled=False)
    # Loop through the different components of the model and get the
    # parameters that are fittable.  Save the "Fittable" objects in
    # "params".  Create a list of functions to set the value of these
    # parameters, named "setters".
    m = model
    components_list = [
        m.get_dependence("drift"),
        m.get_dependence("noise"),
        m.get_dependence("bound"),
        m.get_dependence("IC"),
        m.get_dependence("overlay")
    ]
    required_conditions = list(
        set([x for l in components_list for x in l.required_conditions]))
    assert 0 < len([1 for component in components_list
                      for param_name in component.required_parameters
                          if isinstance(getattr(component, param_name), Fittable)]), \
           "Models must contain at least one Fittable parameter in order to be fit"
    params = []  # A list of all of the Fittables that were passed.
    setters = [
    ]  # A list of functions which set the value of the corresponding parameter in `params`
    for component in components_list:
        for param_name in component.required_parameters:
            pv = getattr(component,
                         param_name)  # Parameter value in the object
            if isinstance(pv, Fittable):
                # Create a function which sets each parameter in the
                # list to some value `a` for model `x`.  Note the
                # default arguments to the function are necessary here
                # to preserve scope.  Without them, these variables
                # would be interpreted in the local scope, so they
                # would be equal to the last value encountered in the
                # loop.
                def setter(x,
                           a,
                           pv=pv,
                           component=component,
                           param_name=param_name):
                    if not isinstance(a, Fittable):
                        a = pv.make_fitted(a)
                    setattr(x.get_dependence(component.depname), param_name, a)
                    # Return the fitted instance so we can chain it.
                    # This way, if the same Fittable object is passed,
                    # the same Fitted object will be in both places in
                    # the solution.
                    return a

                # If we have the same Fittable object in two different
                # components inside the model, we only want the Fittable
                # object in the list "params" once, but we want the setter
                # to update both.
                if id(pv) in map(id, params):
                    pind = list(map(id, params)).index(id(pv))
                    oldsetter = setters[pind]
                    # This is a hack way of executing two functions in
                    # a single function call while passing forward the
                    # same argument object (not just the same argument
                    # value)
                    newsetter = lambda x, a, setter=setter, oldsetter=oldsetter: oldsetter(
                        x, setter(x, a))
                    setters[pind] = newsetter
                else:  # This setter is unique (so far)
                    params.append(pv)
                    setters.append(setter)

    # And now get rid of the Fittables, replacing them with the
    # default values.  Simultaneously, create a list to pass to the
    # solver.
    x_0 = []
    constraints = [
    ]  # List of (min, max) tuples.  min/max=None if no constraint.
    for p, s in zip(params, setters):
        default = p.default()
        s(m, default)
        minval = p.minval if p.minval > -np.inf else None
        maxval = p.maxval if p.maxval < np.inf else None
        constraints.append((minval, maxval))
        x_0.append(default)
    # Set up a loss function
    lf = lossfunction(sample,
                      required_conditions=required_conditions,
                      T_dur=m.T_dur,
                      dt=m.dt,
                      nparams=len(params),
                      samplesize=len(sample))

    # A function for the solver to minimize.  Since the model is in
    # this scope, we can make use of it by using, for example, the
    # model `m` defined previously.
    def _fit_model(xs):
        for x, p, s in zip(xs, params, setters):
            # Sometimes the numpy optimizers will ignore bounds up to
            # floating point errors, i.e. if your upper bound is 1,
            # they will give 1.000000000001.  This fixes that problem
            # to make sure the model is within its domain.
            if x > p.maxval:
                print(
                    "Warning: optimizer went out of bounds.  Setting %f to %f"
                    % (x, p.maxval))
                x = p.maxval
            if x < p.minval:
                print(
                    "Warning: optimizer went out of bounds.  Setting %f to %f"
                    % (x, p.minval))
                x = p.minval
            s(m, x)
        lossf = lf.loss(m)
        print(repr(m), "loss=" + str(lossf))
        return lossf

    # Cast to a dictionary if necessary
    if fitparams is None:
        fitparams = {}
    # Run the solver
    print(x_0)
    if method == "simple":
        x_fit = minimize(_fit_model, x_0, bounds=constraints)
        assert x_fit.success, "Fit failed: %s" % x_fit.message
    elif method == "simplex":
        x_fit = minimize(_fit_model, x_0, method='Nelder-Mead')
    elif method == "basin":
        x_fit = basinhopping(_fit_model,
                             x_0,
                             minimizer_kwargs={
                                 "bounds": constraints,
                                 "method": "TNC"
                             },
                             disp=True,
                             **fitparams)
    elif method == "differential_evolution":
        x_fit = differential_evolution(_fit_model,
                                       constraints,
                                       disp=True,
                                       **fitparams)
    elif method == "hillclimb":
        x_fit = evolution_strategy(_fit_model, x_0, **fitparams)
    elif callable(method):
        x_fit = method(_fit_model, x_0=x_0, constraints=constraints)
    else:
        raise NotImplementedError("Invalid method")
    res = FitResult(
        method=method,
        loss=lf.name,
        value=x_fit.fun,
        nparams=len(params),
        samplesize=len(sample),
        mess=(x_fit.message if "message" in x_fit.__dict__ else ""))
    m.fitresult = res
    print("Params", x_fit.x, "gave", x_fit.fun)
    for x, s in zip(x_fit.x, setters):
        s(m, x)
    if paranoid_state and not verify:
        paranoid_settings.set(enabled=True)
    return m
Beispiel #9
0
def model_gui(model,
              sample=None,
              data_dt=.01,
              plot=plot_fit_diagnostics,
              conditions=None,
              verify=False):
    """Mess around with model parameters visually.

    This allows you to see how the model `model` would be affected by
    various changes in parameter values.  It also allows you to easily
    plot `sample` conditioned on different conditions.  A sample is
    required so that model_gui knows the conditions to include and the
    ratio of these conditions.

    The function `plot` allows you to change what is plotted.
    By default, it is plot_fit_diagnostics.  If you would like to
    define your own custom function, it must take four keyword
    arguments: "model", the model to plot, "sample", an optional
    (defaulting to None) Sample object to potentially compare to the
    model, "fig", an optional (defaulting to None) matplotlib figure
    to plot on, and "conditions", the conditions selected for
    plotting.  It should not return anything, but it should draw the
    figure on "fig".

    Because sometimes the model is run in very high resolution,
    `data_dt` allows you to set the bin width for `sample`.

    For performance purposes, Paranoid Scientist verification is
    disabled when running this function.  Enable it by setting the
    `verify` argument to True.

    Some of this code is taken from `fit_model`.
    """
    assert _gui_compatible == True, "Due to a OSX bug in matplotlib," \
        " matplotlib's backend must be explicitly set to TkAgg. To avoid" \
        " this, please import ddm.plot BEFORE matplotlib.pyplot."
    # Make sure either a sample or conditions are specified.
    assert not model.required_conditions or (sample or conditions), \
        "If a sample is not passed, conditions must be passed through the 'conditions' argument."
    # Disable paranoid for this
    paranoid_state = paranoid_settings.get('enabled')
    if paranoid_state and not verify:
        paranoid_settings.set(enabled=False)
    # Loop through the different components of the model and get the
    # parameters that are fittable.  Save the "Fittable" objects in
    # "params".  Since the name is not saved in the parameter object,
    # save them in a list of the same size called "paramnames".  (We
    # can't use a dictonary because some parameters have the same
    # name.)  Create a list of functions to set the value of these
    # parameters, named "setters".
    if model:
        components_list = [model.get_dependence("drift"),
                           model.get_dependence("noise"),
                           model.get_dependence("bound"),
                           model.get_dependence("IC"),
                           model.get_dependence("overlay")]
        # All of the conditions required by at least one of the model
        # components.
        required_conditions = list(set([x for l in components_list for x in l.required_conditions]))
        if sample:
            sample_condition_values = {cond: sample.condition_values(cond) for cond in required_conditions}
        else:
            assert all(c in conditions.keys() for c in required_conditions), \
                "Please pass all conditions needed by the model in the 'conditions' argument."
            sample_condition_values = {c : (list(sorted(conditions[c])) if isinstance(c, list) else conditions[c]) for c in required_conditions}
    elif sample:
        components_list = []
        required_conditions = sample.condition_names()
        sample_condition_values = {cond: sample.condition_values(cond) for cond in required_conditions}
    else:
        print("Must define model, sample, or both")
        return
    
    params = [] # A list of all of the Fittables that were passed.
    setters = [] # A list of functions which set the value of the corresponding parameter in `params`
    paramnames = [] # The names of the parameters
    for component in components_list:
        for param_name in component.required_parameters: # For each parameter in the model
            pv = getattr(component, param_name) # Parameter value in the object
            if isinstance(pv, Fittable): # If this was fit (or can be fit) via optimization
                # Create a function which sets each parameter in the
                # list to some value `a` for model `x`.  Note the
                # default arguments to the function are necessary here
                # to preserve scope.  Without them, these variables
                # would be interpreted in the local scope, so they
                # would be equal to the last value encountered in the
                # loop.
                def setter(x,a,pv=pv,component=component,param_name=param_name):
                    if not isinstance(a, Fittable):
                        a = pv.make_fitted(a)
                    setattr(x.get_dependence(component.depname), param_name, a)
                    # Return the fitted instance so we can chain it.
                    # This way, if the same Fittable object is passed,
                    # the same Fitted object will be in both places in
                    # the solution.
                    return a 
                
                # If we have the same Fittable object in two different
                # components inside the model, we only want the
                # Fittable object in the list "params" once, but we
                # want the setter to update both.  We use 'id' because
                # we only want this to be the case with an identical
                # parameter object, not just an identical name/value.
                if id(pv) in map(id, params):
                    pind = list(map(id, params)).index(id(pv))
                    oldsetter = setters[pind]
                    # This is a hack way of executing two functions in
                    # a single function call while passing forward the
                    # same argument object (not just the same argument
                    # value)
                    newsetter = lambda x,a,setter=setter,oldsetter=oldsetter : oldsetter(x,setter(x,a))
                    setters[pind] = newsetter
                    paramnames[pind] += "/"+param_name # "/" for cosmetics for multiple parameters
                else: # This setter is unique (so far)
                    params.append(pv)
                    setters.append(setter)
                    paramnames.append(param_name)
    # Since we don't want to modify the original model, duplicate it,
    # and then use that base model in the optimization routine.  (We
    # can't duplicate it earlier in this function or else duplicated
    # parameters will have separate setters since they will no
    # longer have the same id.
    m = copy.deepcopy(model) if model else None
    
    # Grid of the Fittables, replacing them with the default values.
    x_0 = [] # Default parameter values
    for p,s in zip(params, setters):
        # Save the default
        default = p.default()
        x_0.append(default)
        # Set the default
        s(m, default)
    
    # Initialize the TK (tkinter) subsystem.
    root = tk.Tk()    
    root.wm_title("Model: %s" % m.name if m else "Data")
    root.grid_columnconfigure(1, weight=0)
    root.grid_columnconfigure(2, weight=2)
    root.grid_columnconfigure(3, weight=1)
    root.grid_columnconfigure(4, weight=0)
    root.grid_rowconfigure(0, weight=1)
    
    # Creates a widget for a matplotlib figure.  Anything drawn to
    # this figure can be displayed by calling canvas.draw().
    fig = Figure()
    canvas = FigureCanvasTkAgg(fig, master=root)
    canvas.get_tk_widget().grid(row=0, column=2, sticky="nswe")
    fig.text(.5, .5, "Loading...")
    canvas.draw()
    
    def update():
        """Redraws the plot according to the current parameters of the model
        and the selected conditions."""
        print("cond var vals", condition_vars_values)
        print("cond vars", condition_vars)
        current_conditions = {c : condition_vars_values[i][condition_vars[i].get()] for i,c in enumerate(required_conditions) if condition_vars[i].get() != "All"}
        # If any conditions were "all", they will not be in current
        # conditions.  Here, we update current_conditions with any
        # conditions which were specified in the conditions argument,
        # implying they are not in the sample.
        if conditions is not None:
            for k,v in conditions.items():
                if k not in current_conditions.keys():
                    current_conditions[k] = v
        fig.clear()
        # If there was an error, display it instead of a plot
        try:
            plot(model=m, fig=fig, sample=sample, conditions=current_conditions, data_dt=data_dt)
        except:
            fig.clear()
            fig.text(0, 1, traceback.format_exc(), horizontalalignment="left", verticalalignment="top")
            canvas.draw()
            raise
        canvas.draw()
    
    def value_changed():
        """Calls update() if the real time checkbox is checked.  Triggers when a value changes on the sliders or the condition radio buttons"""
        if real_time.get() == True:
            update()
    
    # Draw the radio buttons allowing the user to select conditions
    frame_params_container = tk.Canvas(root, bd=2, width=110)
    frame_params_container.grid(row=0, column=0, sticky="nesw")
    scrollbar_params = tk.Scrollbar(root, command=frame_params_container.yview)
    scrollbar_params.grid(row=0, column=1, sticky="ns")
    frame_params_container.configure(yscrollcommand = scrollbar_params.set)

    frame = tk.Frame(master=frame_params_container)
    windowid_params = frame_params_container.create_window((0,0), window=frame, anchor='nw')
    # Get the sizing right
    def adjust_window_params(e, wid=windowid_params, c=frame_params_container):
        c.configure(scrollregion=frame_params_container.bbox('all'))
        c.itemconfig(wid, width=e.width)
    frame_params_container.bind("<Configure>", adjust_window_params)

    
    #frame = tk.Frame(master=root)
    #frame.grid(row=0, column=0, sticky="nw")
    condition_names = required_conditions
    if required_conditions is not None:
        condition_names = [n for n in condition_names if n in required_conditions]
    condition_vars = [] # Tk variables for condition values (set by radio buttons)
    condition_vars_values = [] # Corresponds to the above, but with numerical values instead of strings
    for i,cond in enumerate(condition_names):
        lframe = tk.LabelFrame(master=frame, text=cond)
        lframe.pack(expand=True, anchor=tk.W)
        thisvar = tk.StringVar()
        condition_vars.append(thisvar)
        b = tk.Radiobutton(master=lframe, text="All", variable=thisvar, value="All", command=value_changed)
        b.pack(anchor=tk.W)
        for cv in sample_condition_values[cond]:
            b = tk.Radiobutton(master=lframe, text=str(cv), variable=thisvar, value=cv, command=value_changed)
            b.pack(anchor=tk.W)
        condition_vars_values.append({str(cv) : cv for cv in sample_condition_values[cond]})
        thisvar.set("All")
    
    # And now create the sliders.  While we're at it, get rid of the
    # Fittables, replacing them with the default values.
    if params: # Make sure there is at least one parameter
        # Allow a scrollbar
        frame_sliders_container = tk.Canvas(root, bd=2, width=200)
        frame_sliders_container.grid(row=0, column=3, sticky="nsew")
        scrollbar = tk.Scrollbar(root, command=frame_sliders_container.yview)
        scrollbar.grid(row=0, column=4, sticky="ns")
        frame_sliders_container.configure(yscrollcommand = scrollbar.set)
        
        # Construct the region with sliders
        frame_sliders = tk.LabelFrame(master=frame_sliders_container, text="Parameters")
        windowid = frame_sliders_container.create_window((0,0), window=frame_sliders, anchor='nw')
        # Get the sizing right
        def adjust_window(e, wid=windowid, c=frame_sliders_container):
            c.configure(scrollregion=frame_sliders_container.bbox('all'))
            c.itemconfig(wid, width=e.width)
        frame_sliders_container.bind("<Configure>", adjust_window)
    widgets = [] # To set the value programmatically in, e.g., set_defaults
    for p,s,name in zip(params, setters, paramnames):
        # Calculate slider constraints
        minval = p.minval if p.minval > -np.inf else None
        maxval = p.maxval if p.maxval < np.inf else None
        slidestep = (maxval-minval)/200 if maxval and minval else .01
        # Function for the slider change.  A hack to execute both the
        # value changed function and set the value in the model.
        onchange = lambda x,s=s : [s(m, float(x)), value_changed()]
        # Create the slider and set its value
        slider = tk.Scale(master=frame_sliders, label=name, from_=minval, to=maxval, resolution=slidestep, orient=tk.HORIZONTAL, command=onchange)
        slider.set(default)
        slider.pack(expand=True, fill="both")
        widgets.append(slider)
        
    def set_defaults():
        """Set default slider (model parameter) values"""
        for w,default,s in zip(widgets,x_0,setters):
            w.set(default)
            s(m, default)
        update()
    
    # Draw the buttons and the real-time checkbox
    real_time = tk.IntVar()
    c = tk.Checkbutton(master=frame, text="Real-time", variable=real_time)
    c.pack(expand=True, fill="both")
    b = tk.Button(master=frame, text="Update", command=update)
    b.pack(expand=True, fill="both")
    b = tk.Button(master=frame, text="Reset", command=set_defaults)
    b.pack(expand=True, fill="both")
    
    root.update()
    set_defaults()
    frame_params_container.configure(scrollregion=frame_params_container.bbox('all'))
    tk.mainloop()
    # Re-enable paranoid
    if paranoid_state and not verify:
        paranoid_settings.set(enabled=True)
    return m
Beispiel #10
0
 def test_no_invalid_value(self):
     """Validation of settings values"""
     fails(lambda: Settings._set("enabled", 3))
     fails(lambda: Settings._set("max_cache", 3.1))
     fails(lambda: Settings._set("max_runtime", True))