Example #1
0
def v_x_dissonance_sethares(f1,
                            f2,
                            a1=None,
                            a2=None,
                            A=3.5,
                            B=5.75,
                            DSTAR=0.24,
                            S1=0.21,
                            S2=19):
    """
    Sethares' (1998) version of the Plomp and Levelt cross-dissonance.
    Vector version.
    Note that cross-dissonance ignores self-dissonance;
    """
    if a1 is None:
        a1 = np.ones_like(f1)
    if a2 is None:
        a2 = np.ones_like(f2)
    f1 = f1.reshape(1, -1)
    a1 = a1.reshape(1, -1)
    f2 = f2.reshape(-1, 1)
    a2 = a2.reshape(-1, 1)
    df = abs(f2 - f1)
    s = DSTAR / (S1 * np.minimum(f1, f2) + S2)
    sdf = np.minimum(s * df, 20.0)
    return ((a1 * a2) * (np.exp(-A * sdf) - np.exp(-B * sdf))).sum()
Example #2
0
def TrackVelocity(x, k, vmax, acmax, Ta):
    ''' compute the velocity at each point along the track (given
    already-computed curvatures) assuming a certain accelration profile '''
    v = np.minimum(np.abs(acmax / k)**0.5, vmax)

    # also compute arc distance between successive points in x given curvature
    # k; for now we'll just use the linear distance though as it's close enough
    s = np.abs(np.concatenate([x[1:] - x[:-1], x[:1] - x[-1:]]))

    va = 0
    T = 0
    vout = []

    # first pass is just to get the initial velocity
    # let's assume it's zero
    # for i in range(1, len(k)):
    #     va = va + (v[i] - va) / Ta

    for i in range(0, len(k)):
        a = (v[i] - va) / Ta  # acceleration
        dt = s[i] / (va + a/2)  # time to reach next waypoint
        va = np.minimum(va + dt * (v[i] - va) / Ta, v[i])
        T += dt
        vout.append(va)
    return np.array(vout), T
Example #3
0
def TrackVelocity(x, k, vmax, acmax, Ta):
    ''' compute the velocity at each point along the track (given
    already-computed curvatures) assuming a certain accelration profile '''
    v = np.minimum(np.abs(acmax / k)**0.5, vmax)

    # also compute arc distance between successive points in x given curvature
    # k; for now we'll just use the linear distance though as it's close enough
    s = np.abs(np.concatenate([x[1:] - x[:-1], x[:1] - x[-1:]]))

    va = 0
    T = 0
    vout = []

    # first pass is just to get the initial velocity
    # let's assume it's zero
    # for i in range(1, len(k)):
    #     va = va + (v[i] - va) / Ta

    for i in range(0, len(k)):
        a = (v[i] - va) / Ta  # acceleration
        dt = s[i] / (va + a / 2)  # time to reach next waypoint
        va = np.minimum(va + dt * (v[i] - va) / Ta, v[i])
        T += dt
        vout.append(va)
    return np.array(vout), T
Example #4
0
    def rand_x(self, n=1):
        tmp = np.random.uniform(0, 1, (n))
        idx = (tmp < 0.4)
        x = np.random.uniform(-0.5, 0.5, (self.dim, n))
        x[:, idx] = (0.05 * np.random.uniform(-0.5, 0.5,
                                              (self.dim, idx.sum())).T +
                     self.best_x[1]).T
        x[:, idx] = np.maximum(-0.5, np.minimum(0.5, x[:, idx]))

        idx = (tmp < 0.5) * (tmp > 0.4)
        x[:, idx] = (0.05 * np.random.uniform(-0.5, 0.5,
                                              (self.dim, idx.sum())).T +
                     self.best_x[0]).T
        x[:, idx] = np.maximum(-0.5, np.minimum(0.5, x[:, idx]))

        idx = (tmp > 0.5) * (tmp < 0.6)
        num = idx.sum()
        num_seed = np.minimum(self.dataset['high_y'].shape[1], 5)
        idx = np.argsort(idx)[-num:]
        idx1 = np.random.randint(0, self.dim, num)
        idx2 = np.random.randint(0, num_seed, num)
        idx3 = np.random.randint(0, num_seed, num)
        for i in range(num):
            while idx2[i] == idx3[i]:
                idx3[i] = random.randint(0, num_seed)
            x[:idx1[i], i] = self.dataset['high_x'][:idx1[i], -idx2[i]]
            x[idx1[i]:, i] = self.dataset['high_x'][idx1[i]:, -idx3[i]]
        return x
Example #5
0
def odeInt(f, x0, y0, xT):
    # 1993 Solving Ordinary Differential Equations I, page 169
    # initial step size
    f0 = f(x0, y0)
    d0 = rmsNorm(y0)
    d1 = rmsNorm(f0)
    if d0 < 1e-5 or d1 < 1e-5:
        h0 = 1e-6
    else:
        h0 = 1e-2 * d0 / d1
    y1 = y0 + f0 * h0
    f1 = f(x0 + h0, y1)
    d2 = rmsNorm(f1 - f0) / h0
    maxD = np.maximum(d1, d2)
    if maxD <= 1e-15:
        h1 = np.maximum(1e-6, h0 * 1e-3)
    else:
        h1 = np.power(1e-2 / maxD, 1 / (P + 1))
    step = np.minimum(1e2 * h0, h1)
    # integrate
    x = x0
    y = y0
    k1 = f0
    while x < xT:
        rejected = False
        accepted = False
        while not accepted:
            ks, y1, y1h = odeIntStep(step, f, x, y, k1)
            xNew = x + step
            scale = ABS_TOL + np.maximum(np.abs(y1), np.abs(y1h)) * REL_TOL
            errNorm = rmsNorm((y1 - y1h) / scale)
            if errNorm < 1:
                accepted = True
                if errNorm == 0:
                    updateFactor = MAX_UPDATE_FACTOR
                else:
                    updateFactor = np.minimum(
                        MAX_UPDATE_FACTOR,
                        SAFETY_FACTOR * np.power(errNorm, ERROR_EXP))
                if rejected:
                    updateFactor = np.minimum(1, updateFactor)
                step *= updateFactor
            else:
                rejected = True
                updateFactor = np.maximum(
                    MIN_UPDATE_FACTOR,
                    SAFETY_FACTOR * np.power(errNorm, ERROR_EXP))
                step *= updateFactor
        # interpolate
        # update
        x = xNew
        y = y1
        k1 = ks[6]
    step = xT - x
    ks, y1, y1h = odeIntStep(step, f, x, y, k1)
    return y1
Example #6
0
def test_minmax():
  grad_test(lambda x: ti.min(x, 0), lambda x: np.minimum(x, 0))
  grad_test(lambda x: ti.min(x, 1), lambda x: np.minimum(x, 1))
  grad_test(lambda x: ti.min(0, x), lambda x: np.minimum(0, x))
  grad_test(lambda x: ti.min(1, x), lambda x: np.minimum(1, x))

  grad_test(lambda x: ti.max(x, 0), lambda x: np.maximum(x, 0))
  grad_test(lambda x: ti.max(x, 1), lambda x: np.maximum(x, 1))
  grad_test(lambda x: ti.max(0, x), lambda x: np.maximum(0, x))
  grad_test(lambda x: ti.max(1, x), lambda x: np.maximum(1, x))
 def compute_xnew(inputs, lambda_):
     x, dc, dv = unpack(inputs)
     # avoid dividing by zero outside the design region
     dv = np.where(np.ravel(args['mask']) > 0, dv, 1)
     # square root is not defined for negative numbers, which can happen due to
     # small numerical errors in the computed gradients.
     xnew = x * np.maximum(-dc / (lambda_ * dv), 0) ** eta
     lower = np.maximum(0.0, x - max_move)
     upper = np.minimum(1.0, x + max_move)
     # note: autograd does not define gradients for np.clip
     return np.minimum(np.maximum(xnew, lower), upper)
Example #8
0
    def invert(self, data, input=None, mask=None, tag=None):
        yhat = smooth(data,20)
        xhat = self.link(np.clip(yhat, 0.01, np.inf))
        xhat = self._invert(xhat, input=input, mask=mask, tag=tag)
        for t in range(xhat.shape[0]):
            if np.all(xhat[np.max([0,t-2]):t+3]>0.99) and t>2:
                xhat[np.minimum(0,t-2):] = 1.01*np.ones(np.shape(xhat[np.minimum(0,t-2):]))
                xhat[:np.maximum(0,t-2)] = np.clip(xhat[:np.maximum(0,t-2)], -0.5,0.95)

        if np.abs(xhat[0])>1.0:
                xhat[0] = 0.5 + 0.01*npr.randn(1,np.shape(xhat)[1])
        return xhat
Example #9
0
def constraint_c2(f, r):
    n_obj = f.shape[1]

    v1 = anp.inf * anp.ones(f.shape[0])

    for i in range(n_obj):
        temp = (f[:, i] - 1)**2 + (anp.sum(f**2, axis=1) - f[:, i]**2) - r**2
        v1 = anp.minimum(temp.flatten(), v1)

    a = 1 / anp.sqrt(n_obj)
    v2 = anp.sum((f - a)**2, axis=1) - r**2
    g = anp.minimum(v1, v2.flatten())

    return g
 def IoG(self, box):
     inter_xmin = np.maximum(box[:, 0], self.IoG_gt[:, 0])
     inter_ymin = np.maximum(box[:, 1], self.IoG_gt[:, 1])
     inter_xmax = np.minimum(box[:, 2], self.IoG_gt[:, 2])
     inter_ymax = np.minimum(box[:, 3], self.IoG_gt[:, 3])
     Iw = np.clip(inter_xmax - inter_xmin, 0, 5)
     Ih = np.clip(inter_ymax - inter_ymin, 0, 5)
     I = Iw * Ih
     G = (self.IoG_gt[:, 2] - self.IoG_gt[:, 0]) * (self.IoG_gt[:, 3] -
                                                    self.IoG_gt[:, 1])
     iog = I / G
     smln = smoothln(iog)
     n_p = float(I.shape[0])
     return smln.sum() / n_p
Example #11
0
def true_divg(x):
    mu = x[0]
    sig = np.exp(x[1])

    for m in range(K):
        X = mu + (sig) * Z[:, m]
        P = (stats.norm.pdf(X, 2, 0.8) + stats.norm.pdf(X, 0, 0.8) +
             stats.norm.pdf(X, -2, 0.8) + stats.norm.pdf(X, -4, 0.8)) / 4
        logQ = stats.t.logpdf(X, 10, mu, sig)

        logF = np.log(P) - logQ
        t = T_value + logF
        log_accept_prob = np.minimum(0, t)
        log_Z_R = logsumexp(log_accept_prob -
                            np.log(len(t)))  # Sampling distribution is Q

        U = np.random.uniform(0, 1, (len(X)))

        Samples = X[np.log(U) < log_accept_prob]  # Sampling distribution is R

        P = (stats.norm.pdf(Samples, 2, 0.8) +
             stats.norm.pdf(Samples, 0, 0.8) + stats.norm.pdf(
                 Samples, -2, 0.8) + stats.norm.pdf(Samples, -4, 0.8)) / 4
        Q = stats.t.pdf(Samples, 10, mu, sig)
        logF = np.log(P) - np.log(Q)
        t = T_value + logF
        log_accept_prob = np.minimum(0, t)

    gamma_p = np.log(P)
    gamma_r = np.log(Q) + log_accept_prob - log_Z_R

    if alpha < 1.0 - 10e-5:
        ratio = (1 - alpha) * (gamma_p - gamma_r)

        Max_ratio = max(ratio)

        true_divg = (1 / (alpha - 1)) * (
            (logsumexp(ratio - Max_ratio - np.log(len(ratio)))) + Max_ratio)

    elif alpha < 1.0 + 10e-5:
        true_divg = np.mean((gamma_r - gamma_p))

    else:
        ratio = (1 - alpha) * (gamma_p - gamma_r)

        true_divg = (1 /
                     (alpha - 1)) * ((logsumexp(ratio - np.log(len(ratio)))))

    return (true_divg)
Example #12
0
 def loss(x):
     nonlocal best_x
     nonlocal best_loss
     x = x.reshape(model.dim, int(x.size / model.dim))
     EI = np.zeros((x.shape[1]))
     if model.best_constr <= 0:
         py, ps2 = model.models[0].predict(x)
         ps = np.sqrt(ps2) + 0.000001
         tmp = -(py - model.best_y[0]) / ps
         # tmp > -40
         # tmp1 = np.maximum(-40, tmp)
         EI1 = ps * (tmp * cdf(tmp) + pdf(tmp))
         EI1 = np.log(np.maximum(0.000001, EI1))
         # tmp <= -40
         tmp2 = np.minimum(-40, tmp)**2
         EI2 = np.log(ps) - tmp2 / 2 - np.log(tmp2 - 1)
         # EI
         EI = EI1 * (tmp > -40) + EI2 * (tmp <= -40)
     PI = np.zeros((x.shape[1]))
     for i in range(1, model.outdim):
         py, ps2 = model.models[i].predict(x)
         ps = np.sqrt(ps2) + 0.000001
         PI = PI + logphi_vector(-py / ps)
     tmp_loss = -EI - PI
     tmp_loss = tmp_loss.sum()
     if tmp_loss < best_loss:
         best_loss = tmp_loss
         best_x = np.copy(x)
     return tmp_loss
    def loss_func(param_vec=None, step_id=None, **unused_kwargs):
        """ Compute loss at provided flat parameter vec

        Returns
        -------
        loss_val : float
        """
        if step_id is None or step_id < 0:
            cur_dataset = dataset
            frac_train_laps_completed = 1.0
        else:
            cur_slice = make_slice_for_step(step_id=step_id,
                                            seed=data_seed,
                                            n_total=dataset['n_docs'],
                                            n_batches=n_batches)
            cur_dataset = slda_utils__dataset_manager.slice_dataset(
                dataset=dataset, cur_slice=cur_slice)
            frac_train_laps_completed = np.minimum(
                1.0,
                float(step_id) / float(max_train_laps * n_batches))

        return calc_loss__slda(
            param_vec=param_vec,
            dataset=cur_dataset,
            frac_train_laps_completed=frac_train_laps_completed,
            pi_max_iters_first_train_lap=pi_max_iters_first_train_lap,
            dim_P=dim_P,
            **model_hyper_P)
Example #14
0
 def decrease(self, mu):
     self.factor = np.minimum(1 / self.min_factor,
                              self.factor / self.min_factor)
     if self.mu * self.factor > self.min_mu:
         self.mu = self.mu * self.factor
     else:
         self.mu = 0
Example #15
0
    def __init__(self, n, penalty='huber', alpha=1.0):
        assert (alpha > 0.0)
        self.alpha = alpha
        self.alpha_sq = alpha**2
        self.penalty = penalty.lower()
        if (self.penalty == 'quadratic'):
            self.phi = lambda z: 0.5 * np.power(z, 2.0)
        elif (self.penalty == 'pseudo-huber'):
            self.phi = lambda z: self.alpha_sq * (np.sqrt(1.0 + np.power(
                z, 2.0) / self.alpha_sq) - 1.0)
        elif (self.penalty == 'huber'):
            self.phi = lambda z: np.where(
                np.abs(z) <= alpha, 0.5 * np.power(z, 2.0),
                alpha * np.abs(z) - 0.5 * self.alpha_sq)
        elif (self.penalty == 'welsch'):
            self.phi = lambda z: 1.0 - np.exp(-0.5 * np.power(z, 2.0) / self.
                                              alpha_sq)
        elif (self.penalty == 'trunc-quad'):
            self.phi = lambda z: np.minimum(0.5 * np.power(z, 2.0), 0.5 * self.
                                            alpha_sq)
        else:
            assert False, "unrecognized penalty function {}".format(penalty)

        super().__init__(n, 1)  # make sure node is properly constructed
        self.eps = 1.0e-4  # relax tolerance on optimality val
def inv_logistic_sigmoid(
        p, do_force_safe=True):
    ''' Compute inverse logistic sigmoid from unit interval to reals.

    Numerically stable and fully vectorized.

    Args
    ----
    p : array-like, with values in (0, 1)

    Returns
    -------
    x : array-like, size of p, with values in (-infty, infty)

    Examples
    --------
    >>> np.round(inv_logistic_sigmoid(0.11), 6)
    -2.090741
    >>> np.round(inv_logistic_sigmoid(0.5), 6)
    0.0
    >>> np.round(inv_logistic_sigmoid(0.89), 6)
    2.090741

    >>> p_vec = np.asarray([
    ...     1e-100, 1e-10, 1e-5,
    ...     0.25, 0.75, .9999, 1-1e-14])
    >>> np.round(inv_logistic_sigmoid(p_vec), 2)
    array([-230.26,  -23.03,  -11.51,   -1.1 ,    1.1 ,    9.21,   32.24])
    '''
    if do_force_safe:
        p = np.minimum(np.maximum(p, MIN_VAL), MAX_VAL)
    return np.log(p) - np.log1p(-p)
Example #17
0
def to_safe_common_arr(topics_KV, min_eps=MIN_EPS):
    ''' Force provided topics_KV array to be numerically safe.

    Returns
    -------
    topics_KV : 2D array, size K x V
        minimum value of each row is min_eps
        each row will sum to 1.0 (+/- min_eps)
    '''
    K, V = topics_KV.shape
    topics_KV = topics_KV.copy()
    for rep in range(2):
        np.minimum(topics_KV, 1 - min_eps, out=topics_KV)
        topics_KV /= topics_KV.sum(axis=1)[:, np.newaxis]
        np.maximum(topics_KV, min_eps, out=topics_KV)
    return topics_KV
def generate_data(n=1000):
    from scipy.optimize import root_scalar

    p = 0.8
    gamma2 = 1.6
    lambda2 = 0.1
    gamma1 = 3
    lambda1 = 0.1
    beta = 0.5
    X = np.random.binomial(1, 0.5, size=n)

    # confirm that S(5).mean() == 0.106 ✅
    # h(t) looks like graph ✅
    def S(t, x):
        return p * np.exp(
            -lambda1 *
            (t**gamma1) * np.exp(-x * beta * gamma1)) + (1 - p) * np.exp(
                -lambda2 * (t**gamma2) * np.exp(-x * beta * gamma2))

    T_actual = np.empty(n)

    for i in range(n):
        u = np.random.random()
        x = X[i]
        sol = root_scalar(lambda t: S(t, x) - u, x0=1, x1=3)
        assert sol.converged
        T_actual[i] = sol.root

    MAX_TIME = 5
    T_observed = np.minimum(MAX_TIME, T_actual)
    E = T_actual < MAX_TIME
    return pd.DataFrame({"E": E, "T": T_observed, "X": X})
Example #19
0
def lm_cost(x):
    n = (x.shape[0] - 1) // 4
    tk, xk, yk, thetak = split_state(x)
    xk = np.concatenate(([start_x], xk, [goal_x]))
    yk = np.concatenate(([start_y], yk, [goal_y]))
    thetak = np.concatenate(([start_theta], thetak, [goal_theta]))

    time_cost = np.sum(tk)

    # constant curvature path constraint
    path_cost = np.fabs(get_ceq(x))
    # vel, acc, radius limits
    ineq_cost = np.fabs(np.minimum(0, get_c(x)))

    # discourage sharp turns (a bit hacky but wanted paths to be less jagged)
    dist = (xk[1:] - xk[:-1])**2 + (yk[1:] - yk[:-1])**2

    # teb paper said that path cost should be much larger than the rest
    # but these weights/entire cost function could probably use some tuning
    # cost = .1*time_cost + np.sum(5*path_cost) + 5*np.sum(ineq_cost)  + .1*np.sum(dist)
    cost = np.concatenate(
        ([0.01 * time_cost], 10 * path_cost, 10 * ineq_cost, 0.1 * dist))

    # cost = 0.001*time_cost + np.sum( 5000*path_cost) + 10*np.sum(ineq_cost)  + 100*np.sum(dist)

    return cost
 def IoU(self, box):
     inter_xmin = np.maximum(box[:, 0], self.IoU_const[:, 0])
     inter_ymin = np.maximum(box[:, 1], self.IoU_const[:, 1])
     inter_xmax = np.minimum(box[:, 2], self.IoU_const[:, 2])
     inter_ymax = np.minimum(box[:, 3], self.IoU_const[:, 3])
     Iw = np.clip(inter_xmax - inter_xmin, 0, 5)
     Ih = np.clip(inter_ymax - inter_ymin, 0, 5)
     I = Iw * Ih
     A1 = (self.box[:, 2] - self.box[:, 0]) * (self.box[:, 3] -
                                               self.box[:, 1])
     A2 = (self.IoU_const[:, 2] - self.IoU_const[:, 0]) * (
         self.IoU_const[:, 3] - self.IoU_const[:, 1])
     iou = I / (A1 + A2 - I)
     smln = smoothln(iog)
     n_p = float(I.shape[0])
     return smln.sum()
    def predict_cumulative_hazard(self, df, times=None):
        """
        Return the cumulative hazard rate of subjects in X at time points.

        Parameters
        ----------
        X: numpy array or DataFrame
            a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
            can be in any order. If a numpy array, columns must be in the
            same order as the training data.
        times: iterable, optional
            an iterable of increasing times to predict the cumulative hazard at. Default
            is the set of all durations (observed and unobserved). Uses a linear interpolation if
            points in time are not in the index.

        Returns
        -------
        cumulative_hazard_ : DataFrame
            the cumulative hazard of individuals over the timeline
        """
        times = np.asarray(
            coalesce(times, self.timeline, np.unique(self.durations)))
        n = times.shape[0]
        times = times.reshape((n, 1))

        lambdas_ = self._prep_inputs_for_prediction_and_return_parameters(df)

        bp = self.breakpoints
        M = np.minimum(np.tile(bp, (n, 1)), times)
        M = np.hstack([M[:, tuple([0])], np.diff(M, axis=1)])

        return pd.DataFrame(np.dot(M, (1 / lambdas_)),
                            columns=_get_index(df),
                            index=times[:, 0])
 def _cumulative_hazard(self, params, times):
     n = times.shape[0]
     times = times.reshape((n, 1))
     bp = self.breakpoints
     M = np.minimum(np.tile(bp, (n, 1)), times)
     M = np.hstack([M[:, tuple([0])], np.diff(M, axis=1)])
     return np.dot(M, 1 / params)
 def _cumulative_hazard(self, params, T, Xs):
     n = T.shape[0]
     T = T.reshape((n, 1))
     M = np.minimum(np.tile(self.breakpoints, (n, 1)), T)
     M = np.hstack([M[:, tuple([0])], np.diff(M, axis=1)])
     lambdas_ = np.array([safe_exp(-np.dot(Xs[param], params[param])) for param in self._fitted_parameter_names])
     return (M * lambdas_.T).sum(1)
 def integrate(self, t, *args, **kwargs):
     """
     This integral is a simple linear interpolant,
     which I would like to do as a spline.
     However, I need to do it manually, since
     it needs to be autograd differentiable, which splines are not.
     The method here is not especially efficent.
     """
     tau = self.get_param('tau', **kwargs)
     kappa = self.get_param('kappa', **kwargs)
     mu = self.get_param('mu', 0.0, **kwargs)
     f_kappa = self.f_kappa(kappa=kappa, mu=mu)
     t = np.reshape(t, (-1, 1))
     delta = np.diff(tau)
     each = np.maximum(
         0, (t - tau[:-1].reshape(1, -1))
     )
     each = np.minimum(
         each,
         delta.reshape(1, -1)
     )
     return np.sum(
         each * np.reshape(f_kappa, (1, -1)),
         1
     ) + (mu * t.ravel())
Example #25
0
 def loss(x):
     nonlocal tmp_loss
     x = x.reshape(model.dim, -1)
     EI = np.zeros((x.shape[1]))
     if model.best_constr[1] <= 0:
         py, ps2 = model.models[0].predict_low(x)
         ps = np.sqrt(ps2) + 0.000001
         tmp = -(py - model.best_y[0, 0]) / ps
         # tmp > -6
         # tmp1 = np.maximum(-6, tmp)
         EI1 = ps * (tmp * cdf(tmp) + pdf(tmp))
         EI1 = np.log(np.maximum(0.000001, EI1))
         # tmp <= -6
         tmp2 = np.minimum(-6, tmp)**2
         EI2 = np.log(ps) - tmp2 / 2 - np.log(tmp2 - 1)
         # EI
         EI = EI1 * (tmp > -6) + EI2 * (tmp <= -6)
     PI = np.zeros((x.shape[1]))
     for i in range(1, model.outdim):
         py, ps2 = model.models[i].predict_low(x)
         ps = np.sqrt(ps2) + 0.000001
         PI = PI + logphi_vector(-py / ps)
         '''
         tmp = -py/ps
         # tmp > -6
         PI1 = np.log(cdf(tmp))
         # tmp <= -6
         tmp2 = np.minimum(-6, tmp)
         PI2 = -0.5*tmp2**2 - np.log(-tmp2) - 0.5*np.log(2*np.pi)
         PI = PI + PI1*(tmp > -6) + PI2*(tmp <= -6)
         '''
     tmp_loss = -EI - PI
     tmp_loss = tmp_loss.sum()
     return tmp_loss
Example #26
0
def f(theta):
    objReg = 0.5 / 2.0 * np.dot(theta[1:], theta[1:])
    sigmoids = 1.0 / (1.0 + np.exp(
        np.minimum(300.0, -(theta[0] + np.matmul(trainData, theta[1:])))))
    innerSecondTerm = 1.0 - trainLabels + np.multiply(
        sigmoids, (2.0 * trainLabels - 1.0))
    result = np.sum(np.log(innerSecondTerm + 1e-10))
    return objReg - result
Example #27
0
 def plot_gmm(params, ax, num_points=100):
     angles = np.expand_dims(np.linspace(0, 2 * np.pi, num_points), 1)
     xs, ys = np.cos(angles), np.sin(angles)
     circle_pts = np.concatenate([xs, ys], axis=1) * 2.0
     for log_proportion, mean, chol in zip(*unpack_params(params)):
         cur_pts = mean + np.dot(circle_pts, chol)
         alpha = np.minimum(1.0, np.exp(log_proportion) * num_guassians)
         ax.plot(cur_pts[:, 0], cur_pts[:, 1], "-", alpha=alpha)
Example #28
0
 def plot_gmm(params, ax, num_points=100):
     angles = np.expand_dims(np.linspace(0, 2*np.pi, num_points), 1)
     xs, ys = np.cos(angles), np.sin(angles)
     circle_pts = np.concatenate([xs, ys], axis=1) * 2.0
     for log_proportion, mean, chol in zip(*unpack_params(params)):
         cur_pts = mean + np.dot(circle_pts, chol)
         alpha = np.minimum(1.0, np.exp(log_proportion) * num_guassians)
         ax.plot(cur_pts[:, 0], cur_pts[:, 1], '-', alpha=alpha)
Example #29
0
def ye_limit(x, trackwidth):
    k = TrackCurvature(x)
    N = len(k)
    lowlimit = -trackwidth/2 * np.ones(N)
    highlimit = trackwidth/2 * np.ones(N)
    # use a 5% margin so we can't actually hit the center of curvature
    lowlimit[k < 0] = np.maximum(0.95/k[k < 0], -trackwidth/2)
    highlimit[k > 0] = np.minimum(0.95/k[k > 0], trackwidth/2)
    return lowlimit, highlimit
Example #30
0
def distance_to_segment(A, B, X):
    # Shapes of A, B, X are (D,), (D,), (..., D) respectively
    # Computes squared distance of points X from line segment AB
    AB_length = np.linalg.norm(B - A)
    AB_hat = (B - A) / AB_length  # Unit vector from A to B
    s = np.dot(X - A, AB_hat)     # Distance along segment AB of closest point
    s_bounded = np.minimum(AB_length, np.maximum(0., s))
    closest_point_on_segment = A + s_bounded[..., None] * AB_hat
    return np.linalg.norm(X - closest_point_on_segment, axis=-1)
Example #31
0
def ye_limit(x, trackwidth):
    k = TrackCurvature(x)
    N = len(k)
    lowlimit = -trackwidth / 2 * np.ones(N)
    highlimit = trackwidth / 2 * np.ones(N)
    # use a 5% margin so we can't actually hit the center of curvature
    lowlimit[k < 0] = np.maximum(0.95 / k[k < 0], -trackwidth / 2)
    highlimit[k > 0] = np.minimum(0.95 / k[k > 0], trackwidth / 2)
    return lowlimit, highlimit
def generate_data(n=20000):
    X = np.random.binomial(1, 0.5, size=n)
    Z = np.random.normal(0, 4, size=n)
    T_actual = np.random.exponential(1 / np.exp(-5 + 1 * X + 1 * Z))
    C = 10 * np.random.random(size=n)

    T_observed = np.minimum(T_actual, C)
    E = T_actual < C
    return pd.DataFrame({"X": X, "E": E, "T": T_observed, "Z": Z})
Example #33
0
 def plot_components(ax, params):
     pgm_params, loglike_params, recogn_params = params
     dirichlet_natparams, niw_natparams = pgm_params
     normalize = lambda arr: np.minimum(1., arr / np.sum(arr) * num_clusters)
     weights = normalize(np.exp(dirichlet.expectedstats(dirichlet_natparams)))
     components = map(get_component, niw.expectedstats(niw_natparams))
     lines = repeat(None) if isinstance(ax, plt.Axes) else ax
     for weight, (mu, Sigma), line in zip(weights, components, lines):
         plot_ellipse(ax, weight, mu, Sigma, line)
Example #34
0
def minConf_SPG(funObj, x, funProj, options=None):
    """ This function implements Mark Schmidt's MATLAB implementation of
    spectral projected gradient (SPG) to solve for projected quasi-Newton
    direction
                min funObj(x) s.t. x in C
    Parameters
    ----------
    funObj: function that returns objective function value and the gradient
    x: initial parameter value
    funProj: fcuntion that returns projection of x onto C
    options:
        verbose: level of verbosity (0: no output, 1: final, 2: iter (default), 3:
            debug)
        optTol: tolerance used to check for optimality (default: 1e-5)
        progTol: tolerance used to check for lack of progress (default: 1e-9)
        maxIter: maximum number of calls to funObj (default: 500)
        numDiff: compute derivatives numerically (0: use user-supplied
            derivatives (default), 1: use finite differences, 2: use complex
            differentials)
        suffDec: sufficient decrease parameter in Armijo condition (default
            : 1e-4)
        interp: type of interpolation (0: step-size halving, 1: quadratic,
            2: cubic)
        memory: number of steps to look back in non-monotone Armijo
            condition
        useSpectral: use spectral scaling of gradient direction (default:
            1)
        curvilinear: backtrack along projection Arc (default: 0)
        testOpt: test optimality condition (default: 1)
        feasibleInit: if 1, then the initial point is assumed to be
            feasible
        bbType: type of Barzilai Borwein step (default: 1)
 
    Notes: 
        - if the projection is expensive to compute, you can reduce the
            number of projections by setting testOpt to 0
    """
    
    nVars = x.shape[0]
    options_default = {'verbose':2, 'numDiff':0, 'optTol':1e-5, 'progTol':1e-9,\
                'maxIter':500, 'suffDec':1e-4, 'interp':2, 'memory':10,\
                'useSpectral':1,'curvilinear':0,'feasibleInit':0,'testOpt':1,\
                'bbType':1}
    options = setDefaultOptions(options, options_default)

    if options['verbose'] >= 2:
        if options['testOpt'] == 1:
            print '{:10s}'.format('Iteration') + \
                    '{:10s}'.format('FunEvals') + \
                    '{:10s}'.format('Projections') + \
                    '{:15s}'.format('StepLength') + \
                    '{:15s}'.format('FunctionVal') + \
                    '{:15s}'.format('OptCond')
        else:
            print '{:10s}'.format('Iteration') + \
                    '{:10s}'.format('FunEvals') + \
                    '{:10s}'.format('Projections') + \
                    '{:15s}'.format('StepLength') + \
                    '{:15s}'.format('FunctionVal')
    
    funEvalMultiplier = 1

    # evaluate initial point
    if options['feasibleInit'] == 0:
        x = funProj(x)
    [f, g] = funObj(x)
    projects = 1
    funEvals = 1

    # optionally check optimality
    if options['testOpt'] == 1:
        projects = projects + 1
        if np.max(np.abs(funProj(x-g)-x)) < options['optTol']:
            if options['verbose'] >= 1:
                print "First-order optimality conditions below optTol at initial point"
            return (x, f, funEvals, projects)
    
    i = 1
    while funEvals <= options['maxIter']:
        # compute step direction
        if i == 1 or options['useSpectral'] == 0:
            alpha = 1.
        else:
            y = g - g_old
            s = x - x_old
            if options['bbType'] == 1:
                alpha = np.dot(s,s)/np.dot(s,y)
            else:
                alpha = np.dot(s,y)/np.dot(y,y)
            if alpha <= 1e-10 or alpha >= 1e10:
                alpha = 1.
        
        d = -alpha * g
        f_old = f
        x_old = x
        g_old = g

        # compute projected step
        if options['curvilinear'] == 0:
            d = funProj(x+d) - x
            projects = projects + 1

        # check that progress can be made along the direction
        gtd = np.dot(g, d)
        if gtd > -options['progTol']:
            if options['verbose'] >= 1:
                print "Directional derivtive below progTol"
            break

        # select initial guess to step length
        if i == 1:
            t = np.minimum(1., 1./np.sum(np.abs(g)))
        else:
            t = 1.

        # compute reference function for non-monotone condition
        if options['memory'] == 1:
            funRef = f
        else:
            if i == 1:
                old_fvals = np.ones(options['memory'])*(-1)*np.infty
            
            if i <= options['memory']:
                old_fvals[i-1] = f
            else:
                old_fvals = np.append(old_fvals[1:], f)
            funRef = np.max(old_fvals)
        
        # evaluate the objective and gradient at the initial step
        if options['curvilinear'] == 1:
            x_new = funProj(x + t*d)
            projects = projects + 1
        else:
            x_new = x + t*d
        [f_new, g_new] = funObj(x_new)
        funEvals = funEvals + 1

        # Backtracking line search
        lineSearchIters = 1
        while f_new > funRef + options['suffDec']*np.dot(g,x_new-x) or \
                isLegal(f_new) == False:
            temp = t
            if options['interp'] == 0 or isLegal(f_new) == False:
                if options['verbose'] == 3:
                    print 'Halving step size'
                t = t/2.
            elif options['interp'] == 2 and isLegal(g_new):
                if options['verbose'] == 3:
                    print "Cubic Backtracking"
                t = polyinterp(np.array([[0,f,gtd],\
                        [t,f_new,np.dot(g_new,d)]]))[0]
            elif lineSearchIters < 2 or isLegal(f_prev):
                if options['verbose'] == 3:
                    print "Quadratic Backtracking"
                t = polyinterp(np.array([[0, f, gtd],\
                        [t, f_new, np.complex(0,1)]]))[0]
            else:
                if options['verbose'] == 3:
                    print "Cubic Backtracking on Function Values"
                t = polyinterp(np.array([[0., f, gtd],\
                                         [t,f_new,np.complex(0,1)],\
                                         [t_prev,f_prev,np.complex(0,1)]]))[0]
            # adjust if change is too small
            if t < temp*1e-3:
                if options['verbose'] == 3:
                    print "Interpolated value too small, Adjusting"
                t = temp * 1e-3
            elif t > temp * 0.6:
                if options['verbose'] == 3:
                    print "Interpolated value too large, Adjusting"
                t = temp * 0.6

            # check whether step has become too small
            if np.max(np.abs(t*d)) < options['progTol'] or t == 0:
                if options['verbose'] == 3:
                    print "Line Search failed"
                t = 0.
                f_new = f
                g_new = g
                break
            
            # evaluate new point
            f_prev = f_new
            t_prev = temp
            if options['curvilinear'] == True:
                x_new = funProj(x + t*d)
                projects = projects + 1
            else:
                x_new = x + t*d
            [f_new, g_new] = funObj(x_new)
            funEvals = funEvals + 1
            lineSearchIters = lineSearchIters + 1
        
        # done with line search

        # take step
        x = x_new
        f = f_new
        g = g_new

        if options['testOpt'] == True:
            optCond = np.max(np.abs(funProj(x-g)-x))
            projects = projects + 1

        # output log
        if options['verbose'] >= 2:
            if options['testOpt'] == True:
                print '{:10d}'.format(i) + \
                      '{:10d}'.format(funEvals*funEvalMultiplier) + \
                      '{:10d}'.format(projects) + \
                      '{:15.5e}'.format(t) + \
                      '{:15.5e}'.format(f) + \
                      '{:15.5e}'.format(optCond)
            else:
                print '{:10d}'.format(i) + \
                      '{:10d}'.format(funEvals*funEvalMultiplier) + \
                      '{:10d}'.format(projects) + \
                      '{:15.5e}'.format(t) + \
                      '{:15.5e}'.format(f)        
        # check optimality
        if options['testOpt'] == True:
            if optCond < options['optTol']:
                if options['verbose'] >= 1:
                    print "First-order optimality conditions below optTol"
                break

        if np.max(np.abs(t*d)) < options['progTol']:
            if options['verbose'] >= 1:
                print "Step size below progTol"
            break
        
        if np.abs(f-f_old) < options['progTol']:
            if options['verbose'] >= 1:
                print "Function value changing by less than progTol"
            break

        if funEvals*funEvalMultiplier > options['maxIter']:
            if options['verbose'] >= 1:
                print "Function evaluation exceeds maxIter"
            break

        i = i + 1

    return (x, f, funEvals, projects)
Example #35
0
def plot_gaussian_mixture(params, ax):
    for log_proportion, mean, cov_sqrt in zip(*unpack_gmm_params(params)):
        alpha = np.minimum(1.0, np.exp(log_proportion) * 10)
        plot_ellipse(ax, mean, cov_sqrt, alpha)