Ejemplo n.º 1
0
    def Calculate(self, varsByCalc, params=None):
        """
        Calculate model predictions for everything in varsByCalc.

        varsByCalc is a dictionary of the form:
            dict[calc name][dep var] = ind var
        
        The return dictionary is of the form:
            dictionary[calc name][dep var][ind var] = result
        """
        if params is not None:
            self.params.update(params)

        results = {}

        calcs_to_do = list(varsByCalc.keys())
        # Record which calculation each node is doing
        calc_assigned = {}
        while calcs_to_do:
            # The number of calculations to do this round. We want to use
            #  all the processors if possible.
            len_this_block = min(SloppyCell.num_procs, len(calcs_to_do))

            for worker in range(1, len_this_block):
                calc = calcs_to_do.pop()
                calc_assigned[worker] = calc
                logger.debug('Assigning calculation %s to worker %i.' %
                             (calc, worker))
                command = 'Network.calculate(net, vars, params)'
                args = {
                    'net': self.get(calc),
                    'vars': varsByCalc[calc],
                    'params': self.params
                }
                comm.send((command, args), dest=worker)

            # The master does his share here
            calc = calcs_to_do.pop()
            # print
            # We use the finally statement because we want to ensure that we
            #  *always* wait for replies from the workers, even if the master
            #  encounters an exception in his evaluation.
            try:
                results[calc] = self.get(calc).calculate(
                    varsByCalc[calc], self.params)
            finally:
                # Collect results from the workers
                for worker in range(1, len_this_block):
                    logger.debug('Receiving result from worker %i.' % worker)
                    results[calc_assigned[worker]] = comm.recv(source=worker)
                # If the master encounts an exception, we'll break out of the
                #  function ***here***

            # Check the results we received. If any is a SloppyCellException,
            #  reraise it.
            for val in results.values():
                if isinstance(val, Utility.SloppyCellException):
                    raise val

        return results
Ejemplo n.º 2
0
def ensemble_trajs(net, times, ensemble):
    """
    Return a list of trajectories evaluated at times for all parameter sets
    in ensemble.
    """
    traj_set = []
    elems_assigned = [ensemble[node::num_procs] for node in range(num_procs)]
    for worker in range(1, num_procs):
        command = 'Ensembles.few_ensemble_trajs(net, times, elements)'
        args = {'net': net, 'times': times, 'elements': elems_assigned[worker]}
        comm.send((command, args), dest=worker)

    traj_set = few_ensemble_trajs(net, times, elems_assigned[0])

    for worker in range(1, num_procs):
        traj_set.extend(comm.recv(source=worker))

    return traj_set
Ejemplo n.º 3
0
import SloppyCell.Utility as Utility


class Statement(object):
    """
    Class for sending Python statements to workers.
    """
    def __init__(self, statement, locals={}):
        self.statement = statement
        self.locals = locals


while my_rank != 0:
    # Wait for a message
    message = comm.recv(source=0)

    # If the message is a SystemExit exception, exit the code.
    if isinstance(message, SystemExit):
        sys.exit()

    # Exception handling:
    #    If we catch a SloppyCellException during a eval(), it's probably just
    #      a numerical issue so we just pass it back to the master to deal with.
    #      Note that we don't catch SloppyCellExceptions for exec'd things.
    #      This is because exec'd things shouldn't return anything, thus the
    #      master won't be waiting for a reply.
    #    If we catch any other exception, it's probably a bug in the code. Print
    #      a nice traceback, save results, and exit the code.
    try:
        if isinstance(message, Statement):
Ejemplo n.º 4
0
def integrate_sensitivity(net, times, params=None, rtol=None, 
                          fill_traj=False, return_derivs=False,
                          redirect_msgs=True):
    logger.debug('Entering integrate_sens on node %i' % my_rank)
    times = np.array(times)
    net.compile()
    if times[0] == 0:
        net.resetDynamicVariables()

    if params is not None:
        net.update_optimizable_vars(params)

    # Assigned variables to process on each node.
    vars_assigned = dict([(node, list(net.optimizableVars.keys())[node::num_procs]) 
                            for node in range(num_procs)])

    # Send out jobs for workers
    for worker in range(1, num_procs):
        logger.debug('Sending to worker %i: %s' % (worker, 
                                                   str(vars_assigned[worker])))
        command = 'Dynamics.integrate_sens_subset(net, times,'\
                'rtol, fill_traj, opt_vars, return_derivs, redirect_msgs=redir)'
        args = {'net':net, 'times':times, 'rtol':rtol, 'fill_traj':fill_traj,
                'opt_vars':vars_assigned[worker], 'return_derivs':return_derivs,
                'redir': redirect_msgs}
        comm.send((command, args), dest=worker)

    logger.debug('Master doing vars %s' % str(vars_assigned[0]))
    try:
        result = integrate_sens_subset(net, times, rtol, fill_traj, 
                                       vars_assigned[0], return_derivs,
                                       redirect_msgs=redirect_msgs)
    except Utility.SloppyCellException:
        # If the master encounters an exception, we must still wait and get 
        #  replies from all the workers (even if we do nothing with them) so 
        #  that communication stays synchronized.
        for worker in range(1, num_procs):
            comm.recv(source=worker)
        raise

    # Begin pulling results together...
    tout = result[0]
    # Build the yout array
    n_dyn, n_opt = len(net.dynamicVars), len(net.optimizableVars)
    yout = np.zeros((len(tout), n_dyn * (n_opt+ 1)), scipy.float_)

    # We use the master's result for the non-sensitivity values
    yout[:, :n_dyn] = result[1][:, :n_dyn]
    if return_derivs:
        youtdt = np.zeros((len(tout), n_dyn * (n_opt+ 1)), scipy.float_)
        youtdt[:, :n_dyn] = result[2][:, :n_dyn]
    else:
        youtdt = None

    # We use the master's result for events that occurred
    event_info = result[-2]
    events_occurred = result[-1]

    # Copy the sensitivity results into yout and (if necessary) youtdt
    # We don't need events_occurred here, because we already have it.
    _parse_sens_result(result, net, vars_assigned[0], yout, youtdt)

    # Now when we listen to the worker's replies, we store any exception they
    #  return in exception_raised. We'll reraise that exception after getting
    #  replies from all the workers.
    exception_raised = None
    
    for worker in range(1, num_procs):
        logger.debug('Receiving result from worker %i.' % worker)
        result = comm.recv(source=worker)
        if isinstance(result, Utility.SloppyCellException):
            exception_raised = result
            continue
        if vars_assigned[worker]:
            _parse_sens_result(result, net, vars_assigned[worker], yout, youtdt,
                               events_occurred)

    if exception_raised:
        raise exception_raised
    
    ddv_dpTrajectory = Trajectory_mod.Trajectory(net, is_sens=True, 
                                                 holds_dt=return_derivs)
    if return_derivs:
        yout = np.concatenate((yout, youtdt), axis=1)
    ddv_dpTrajectory.appendSensFromODEINT(tout, yout, holds_dt = return_derivs)
    ddv_dpTrajectory.events_occurred = events_occurred
    ddv_dpTrajectory.event_info = event_info

    net.trajectory = ddv_dpTrajectory

    return ddv_dpTrajectory
Ejemplo n.º 5
0
def hessian_log_params(sens_traj, data_ids=None, opt_ids=None, 
                       fixed_sf=False, return_dict=False, 
                       uncert_func=typ_val_uncert(1.0, 1e-14)):
    """
    Calculate the "perfect data" hessian in log parameters given a sensitivity
    trajectory.

    sens_traj   Sensitivity trajectory of Network being considered.
    data_ids    A sequence of variable id's to assume we have data for. If 
                data_ids is None, all dynamic and assigned variables will be 
                used.
    opt_ids     A sequence of parameter id's to calculate derivatives with 
                respect to. The hessian is (len(opt_ids) x len(opt_ids)).
                If opt_ids is None, all optimizable variables are considered.
    fixed_sf    If True, calculate the hessian assuming fixed scale factors.
    return_dict If True, returned values are (hess, hess_dict). hess_dict is a
                dictionary keyed on the elements of data_ids; each corresponding
                value is the hessian assuming data only on a single variable.
                hess is the sum of all these hessians
    uncert_func Function that takes in a trajectory and a variable id and
                returns what uncertainty should be assumed for that variable,
                either as a scalar or a list the same length as the trajectory.
    """
    if data_ids is None:
        data_ids = sens_traj.dynamicVarKeys + sens_traj.assignedVarKeys
    if opt_ids is None:
        opt_ids = sens_traj.optimizableVarKeys

    data_sigmas = {}
    for data_id in data_ids:
        ds = uncert_func(sens_traj, data_id)
        if np.isscalar(ds):
            ds = np.zeros(len(sens_traj), scipy.float_) + ds
        data_sigmas[data_id] = ds

    vars_assigned = [data_ids[node::num_procs] for node in range(num_procs)]
    for worker in range(1, num_procs):
        logger.debug('Sending to worker %i.' % worker)
        # reduce the amount we have to pickle
        # The only things the worker needs in the sens_traj are those that
        #  refer to data_ids it has to deal with.
        vars_needed = set(sens_traj.optimizableVarKeys)
        vars_needed.union_update(vars_assigned[worker])
        for var in vars_assigned[worker]:
            vars_needed.union_update([(var, ov) for ov in opt_ids])
        worker_traj = sens_traj.copy_subset(vars_needed)
        # And the only uncertainties it needs have to do with those data_ids
        worker_ds = dict([(var, data_sigmas[var])
                          for var in vars_assigned[worker]])
        command = 'PerfectData.compute_sf_LMHessian_conts(sens_traj, data_ids,'\
                'data_sigmas, opt_ids, fixed_sf)'
        args = {'sens_traj': worker_traj, 'data_ids': vars_assigned[worker], 
                'data_sigmas': worker_ds, 'opt_ids': opt_ids,
                'fixed_sf': fixed_sf}
        comm.send((command, args), dest=worker)

    hess_dict = compute_sf_LMHessian_conts(sens_traj, vars_assigned[0],
                                           data_sigmas, opt_ids, fixed_sf)

    for worker in range(1, num_procs):
        logger.debug('Receiving from worker %i.' % worker)
        hess_dict.update(comm.recv(source=worker))

    hess = np.sum(list(hess_dict.values()), axis=0)
    if return_dict:
        return hess, hess_dict
    else:
        return hess