Ejemplo n.º 1
0
def get_figure_and_axes(fig, ax):
    if checks.is_callable(fig): fig = fig()
    elif fig is None: fig = plt.figure()

    if checks.is_callable(ax): ax = ax(fig)
    elif ax is None: ax = fig.add_subplot(111)

    return fig, ax
Ejemplo n.º 2
0
def xtimes(start, stop=None, step=None):
    checks.check_not_none(start)
    
    if step is None:
        if isinstance(start, (dt.date, dt.time, dt.datetime)) or isinstance(stop, (dt.date, dt.time, dt.datetime)):
            step = dt.timedelta(days=1)
        elif isinstance(start, float) or isinstance(stop, float):
            step = 1.
        else:
            step = 1

    resultwrap = lambda x: x

    if isinstance(start, dt.time):
        start = dt.datetime.combine(dt.datetime(1,1,1,0,0,0), start)
        resultwrap = lambda x: x.time()
    if isinstance(stop, dt.time):
        stop = dt.datetime.combine(dt.datetime(1,1,1,0,0,0), stop) if stop is not None else None
        resultwrap = lambda x: x.time()

    stepfunc = step if checks.is_callable(step) else lambda x: step
    s = stepfunc(start)
    checks.check(npu.sign(s) != 0, 'Step must be positive or negative, not zero')
    
    if stop is None:
        while True:
            yield resultwrap(start)
            start += s
            s = stepfunc(start)
    else:
        while npu.sign(start - stop) == -npu.sign(s):
            yield resultwrap(start)
            start += s
            s = stepfunc(start)
Ejemplo n.º 3
0
def visualise_sized_point_series(time_ser,
                                 value_ser,
                                 size_ser,
                                 scaling=None,
                                 fig=None,
                                 ax=None,
                                 **kwargs):
    if 'alpha' not in kwargs: kwargs['alpha'] = .3

    if scaling is None: scaling = lambda x: x
    elif not checks.is_callable(scaling): scaling = lambda x: scaling * x

    if time_ser is None: time_ser = value_ser.index
    time_ser, value_ser, size_ser = [
        conv.to_plottable_value(x) for x in (time_ser, value_ser, size_ser)
    ]
    value_ser = conv.to_plottable_value(value_ser)
    size_ser = conv.to_plottable_value(size_ser)

    fig, ax = get_figure_and_axes(fig, ax)
    if len(value_ser) > 0:
        ax.scatter(time_ser,
                   value_ser,
                   s=[scaling(x) for x in size_ser],
                   **kwargs)
        rotate_xticklabels(ax)

    return fig, ax
Ejemplo n.º 4
0
def run(observable,
        obss=None,
        times=None,
        obs_covs=None,
        true_values=None,
        df=None,
        fun=None,
        return_df=False):
    if df is not None:
        if obss is not None and (checks.is_string(obss)
                                 or checks.is_int(obss)):
            obss = df[obss]

        if times is None:
            if isinstance(obss, pd.Series): times = obss.index.values
        elif (checks.is_string(times) or checks.is_int(times)):
            times = df[times].values

        if isinstance(obss, pd.Series): obss = obss.values

        if obs_covs is not None and (checks.is_string(obs_covs)
                                     or checks.is_int(obs_covs)):
            obs_covs = df[obs_covs].values

        if true_values is not None and (checks.is_string(true_values)
                                        or checks.is_int(true_values)):
            true_values = df[true_values].values

    checks.check_not_none(obss)

    if not checks.is_iterable_not_string(observable):
        observable = utils.xconst(observable)
    if not checks.is_iterable_not_string(obss): obss = [obss]
    if not checks.is_iterable_not_string(times): times = utils.xconst(times)
    if not checks.is_iterable_not_string(obs_covs):
        obs_covs = utils.xconst(obs_covs)
    if not checks.is_iterable_not_string(true_values):
        true_values = utils.xconst(true_values)

    obs_result = None
    cumulative_log_likelihood = 0.

    if return_df:
        time = []
        filter_name = []
        filter_type = []
        observable_name = []
        accepted = []
        obs_mean = []
        obs_cov = []
        predicted_obs_mean = []
        predicted_obs_cov = []
        cross_cov = []
        innov_mean = []
        innov_cov = []
        prior_state_mean = []
        prior_state_cov = []
        posterior_state_mean = []
        posterior_state_cov = []
        true_value = []
        log_likelihood = []
        gain = []

    last_time = None

    for an_observable, an_obs, a_time, an_obs_cov, a_true_value in zip(
            observable, obss, times, obs_covs, true_values):
        if a_time is None:
            if last_time is None: a_time = 0
            else: a_time = last_time + 1
        last_time = a_time

        if checks.is_callable(an_observable):
            an_observable = an_observable(an_obs)
        if fun is not None: an_obs = fun(an_obs)
        if an_obs_cov is not None:
            if isinstance(an_obs, (Obs, distrs.Distr)):
                raise ValueError(
                    'An observation covariance is provided while the observation is given by a distribution --- conflicting arguments'
                )
            an_obs = distrs.NormalDistr(an_obs, an_obs_cov)

        if return_df and len(time) == 0:
            an_initial_state_mean = an_observable.filter.state.state_distr.mean
            an_initial_state_cov = an_observable.filter.state.state_distr.cov
            time.append(an_observable.filter.time)
            filter_name.append(an_observable.filter.name)
            filter_type.append(type(an_observable.filter))
            observable_name.append(None)
            accepted.append(None)
            obs_mean.append(None)
            obs_cov.append(None)
            predicted_obs_mean.append(None)
            predicted_obs_cov.append(None)
            cross_cov.append(None)
            innov_mean.append(None)
            innov_cov.append(None)
            prior_state_mean.append(
                npu.to_scalar(an_initial_state_mean, raise_value_error=False))
            prior_state_cov.append(
                npu.to_scalar(an_initial_state_cov, raise_value_error=False))
            posterior_state_mean.append(
                npu.to_scalar(an_initial_state_mean, raise_value_error=False))
            posterior_state_cov.append(
                npu.to_scalar(an_initial_state_cov, raise_value_error=False))
            true_value.append(None)
            log_likelihood.append(None)
            gain.append(None)

        if isinstance(an_obs, Obs):
            a_time, _ = _time_and_obs_distr(an_obs, a_time,
                                            an_observable.filter.time)

        predicted_obs = an_observable.predict(time=a_time,
                                              true_value=a_true_value)
        a_prior_state_mean = an_observable.filter.state.state_distr.mean
        a_prior_state_cov = an_observable.filter.state.state_distr.cov
        obs_result = an_observable.observe(obs=an_obs,
                                           time=a_time,
                                           true_value=a_true_value,
                                           predicted_obs=predicted_obs)
        if obs_result.accepted:
            cumulative_log_likelihood += obs_result.log_likelihood
        a_posterior_state_mean = an_observable.filter.state.state_distr.mean
        a_posterior_state_cov = an_observable.filter.state.state_distr.cov

        if return_df:
            time.append(obs_result.obs.time)
            filter_name.append(an_observable.filter.name)
            filter_type.append(type(an_observable.filter))
            observable_name.append(an_observable.name)
            accepted.append(obs_result.accepted)
            obs_mean.append(
                npu.to_scalar(obs_result.obs.distr.mean,
                              raise_value_error=False))
            obs_cov.append(
                npu.to_scalar(obs_result.obs.distr.cov,
                              raise_value_error=False))
            predicted_obs_mean.append(
                npu.to_scalar(obs_result.predicted_obs.distr.mean,
                              raise_value_error=False))
            predicted_obs_cov.append(
                npu.to_scalar(obs_result.predicted_obs.distr.cov,
                              raise_value_error=False))
            cross_cov.append(
                npu.to_scalar(obs_result.predicted_obs.cross_cov,
                              raise_value_error=False))
            innov_mean.append(
                npu.to_scalar(obs_result.innov_distr.mean,
                              raise_value_error=False))
            innov_cov.append(
                npu.to_scalar(obs_result.innov_distr.cov,
                              raise_value_error=False))
            prior_state_mean.append(
                npu.to_scalar(a_prior_state_mean, raise_value_error=False))
            prior_state_cov.append(
                npu.to_scalar(a_prior_state_cov, raise_value_error=False))
            posterior_state_mean.append(
                npu.to_scalar(a_posterior_state_mean, raise_value_error=False))
            posterior_state_cov.append(
                npu.to_scalar(a_posterior_state_cov, raise_value_error=False))
            true_value.append(
                npu.to_scalar(a_true_value, raise_value_error=False))
            log_likelihood.append(
                npu.to_scalar(obs_result.log_likelihood,
                              raise_value_error=False))
            gain.append(
                obs_result.gain if hasattr(obs_result, 'gain') else None)

    df = None
    if return_df:
        df = pd.DataFrame(
            {
                'time': time,
                'filter_name': filter_name,
                'filter_type': filter_type,
                'observable_name': observable_name,
                'accepted': accepted,
                'obs_mean': obs_mean,
                'obs_cov': obs_cov,
                'predicted_obs_mean': predicted_obs_mean,
                'predicted_obs_cov': predicted_obs_cov,
                'cross_cov': cross_cov,
                'innov_mean': innov_mean,
                'innov_cov': innov_cov,
                'prior_state_mean': prior_state_mean,
                'prior_state_cov': prior_state_cov,
                'posterior_state_mean': prior_state_mean,
                'posterior_state_cov': prior_state_cov,
                'true_value': true_value,
                'log_likelihood': log_likelihood,
                'gain': gain
            },
            columns=('time', 'filter_name', 'filter_type', 'observable_name',
                     'accepted', 'obs_mean', 'obs_cov', 'predicted_obs_mean',
                     'predicted_obs_cov', 'cross_cov', 'innov_mean',
                     'innov_cov', 'prior_state_mean', 'prior_state_cov',
                     'posterior_state_mean', 'posterior_state_cov',
                     'true_value', 'log_likelihood', 'gain'))

    return FilterRunResult(obs_result, cumulative_log_likelihood, df)
Ejemplo n.º 5
0
def grid_search(func,
                param_ranges,
                optimisation_id=None,
                work_id=None,
                call_count=1,
                repeat_count=1,
                evaluator=None,
                pype=None):
    param_ranges = copy.copy(param_ranges)
    if optimisation_id is None: optimisation_id = uuid.uuid4().hex
    if work_id is None: work_id = optimisation_id
    if not checks.is_callable(work_id):
        last_index = 0

        def work_id_callable():
            nonlocal last_index
            last_index += 1
            return last_index

        work_id = work_id_callable

    param_names = list(param_ranges)
    param_value_indices = [0] * len(param_names)

    evaluation_statuses = []

    status = _evaluate(func,
                       param_ranges,
                       param_names,
                       copy.copy(param_value_indices),
                       optimisation_id=optimisation_id,
                       work_id=work_id(),
                       call_count=call_count,
                       repeat_count=repeat_count,
                       evaluator=evaluator,
                       pype=pype)
    evaluation_statuses.append(status)

    while True:
        altered_param = False
        for param_index in range(len(param_names) - 1, -1, -1):
            if param_value_indices[param_index] < len(
                    param_ranges[param_names[param_index]]) - 1:
                param_value_indices[param_index] += 1
                altered_param = True
                for param_index1 in range(param_index + 1, len(param_names)):
                    param_value_indices[param_index1] = 0

                status = _evaluate(func,
                                   param_ranges,
                                   param_names,
                                   copy.copy(param_value_indices),
                                   optimisation_id=optimisation_id,
                                   work_id=work_id(),
                                   call_count=call_count,
                                   repeat_count=repeat_count,
                                   evaluator=evaluator,
                                   pype=pype)
                evaluation_statuses.append(status)

                break
        if not altered_param: break

    return GridSearchResult(param_ranges=param_ranges,
                            optimisation_id=optimisation_id,
                            evaluation_statuses=evaluation_statuses)
Ejemplo n.º 6
0
def run(observable,
        obss=None,
        times=None,
        obs_covs=None,
        true_values=None,
        df=None,
        fun=None,
        return_df=False):
    if df is not None:
        if obss is not None and checks.is_string(obss):
            obss = df[obss].values
        if times is not None and checks.is_string(times):
            times = df[times].values
        if obs_covs is not None and checks.is_string(obs_covs):
            obs_covs = df[obs_covs].values
        if true_values is not None and checks.is_string(true_values):
            true_values = df[true_values].values

    checks.check_not_none(obss)

    if not checks.is_iterable_not_string(observable):
        observable = utils.xconst(observable)
    if not checks.is_iterable_not_string(obss): obss = [obss]
    if not checks.is_iterable_not_string(times): times = utils.xconst(times)
    if not checks.is_iterable_not_string(obs_covs):
        obs_covs = utils.xconst(obs_covs)
    if not checks.is_iterable_not_string(true_values):
        true_values = utils.xconst(true_values)

    obs_result = None

    if return_df:
        time = []
        accepted = []
        obs_mean = []
        obs_cov = []
        predicted_obs_mean = []
        predicted_obs_cov = []
        innov_mean = []
        innov_cov = []
        prior_state_mean = []
        prior_state_cov = []
        posterior_state_mean = []
        posterior_state_cov = []
        log_likelihood = []

    for an_observable, an_obs, a_time, an_obs_cov, a_true_value in zip(
            observable, obss, times, obs_covs, true_values):
        if checks.is_callable(an_observable):
            an_observable = an_observable(an_obs)
        if fun is not None: an_obs = fun(an_obs)
        if an_obs_cov is not None:
            if isinstance(an_obs, (Obs, distrs.Distr)):
                raise ValueError(
                    'An observation covariance is provided while the observation is given by a distribution --- conflicting arguments'
                )
            an_obs = distrs.NormalDistr(an_obs, an_obs_cov)

        if return_df and len(time) == 0:
            an_initial_state_mean = an_observable.filter.state.state_distr.mean
            an_initial_state_cov = an_observable.filter.state.state_distr.cov
            time.append(an_observable.filter.time)
            accepted.append(None)
            obs_mean.append(None)
            obs_cov.append(None)
            predicted_obs_mean.append(None)
            predicted_obs_cov.append(None)
            innov_mean.append(None)
            innov_cov.append(None)
            prior_state_mean.append(
                npu.to_scalar(an_initial_state_mean, raise_value_error=False))
            prior_state_cov.append(
                npu.to_scalar(an_initial_state_cov, raise_value_error=False))
            posterior_state_mean.append(
                npu.to_scalar(an_initial_state_mean, raise_value_error=False))
            posterior_state_cov.append(
                npu.to_scalar(an_initial_state_cov, raise_value_error=False))
            log_likelihood.append(None)

        if isinstance(an_obs, Obs):
            a_time, _ = _time_and_obs_distr(an_obs, a_time,
                                            an_observable.filter.time)

        predicted_obs = an_observable.predict(time=a_time,
                                              true_value=a_true_value)
        a_prior_state_mean = an_observable.filter.state.state_distr.mean
        a_prior_state_cov = an_observable.filter.state.state_distr.cov
        obs_result = an_observable.observe(obs=an_obs,
                                           time=a_time,
                                           true_value=a_true_value,
                                           predicted_obs=predicted_obs)
        a_posterior_state_mean = an_observable.filter.state.state_distr.mean
        a_posterior_state_cov = an_observable.filter.state.state_distr.cov

        if return_df:
            time.append(obs_result.obs.time)
            accepted.append(obs_result.accepted)
            obs_mean.append(
                npu.to_scalar(obs_result.obs.distr.mean,
                              raise_value_error=False))
            obs_cov.append(
                npu.to_scalar(obs_result.obs.distr.cov,
                              raise_value_error=False))
            predicted_obs_mean.append(
                npu.to_scalar(obs_result.predicted_obs.distr.mean,
                              raise_value_error=False))
            predicted_obs_cov.append(
                npu.to_scalar(obs_result.predicted_obs.distr.cov,
                              raise_value_error=False))
            innov_mean.append(
                npu.to_scalar(obs_result.innov_distr.mean,
                              raise_value_error=False))
            innov_cov.append(
                npu.to_scalar(obs_result.innov_distr.cov,
                              raise_value_error=False))
            prior_state_mean.append(
                npu.to_scalar(a_prior_state_mean, raise_value_error=False))
            prior_state_cov.append(
                npu.to_scalar(a_prior_state_cov, raise_value_error=False))
            posterior_state_mean.append(
                npu.to_scalar(a_posterior_state_mean, raise_value_error=False))
            posterior_state_cov.append(
                npu.to_scalar(a_posterior_state_cov, raise_value_error=False))
            log_likelihood.append(
                npu.to_scalar(obs_result.log_likelihood,
                              raise_value_error=False))

    if return_df:
        return pd.DataFrame(
            {
                'time': time,
                'accepted': accepted,
                'obs_mean': obs_mean,
                'obs_cov': obs_cov,
                'predicted_obs_mean': predicted_obs_mean,
                'predicted_obs_cov': predicted_obs_cov,
                'innov_mean': innov_mean,
                'innov_cov': innov_cov,
                'prior_state_mean': prior_state_mean,
                'prior_state_cov': prior_state_cov,
                'posterior_state_mean': prior_state_mean,
                'posterior_state_cov': prior_state_cov,
                'log_likelihood': log_likelihood
            },
            columns=('time', 'accepted', 'obs_mean', 'obs_cov',
                     'predicted_obs_mean', 'predicted_obs_cov', 'innov_mean',
                     'innov_cov', 'prior_state_mean', 'prior_state_cov',
                     'posterior_state_mean', 'posterior_state_cov',
                     'log_likelihood'))

    return obs_result