def mle_batch(self, data, batch, k):
        """
        Calculates LID values of data w.r.t batch
        Args:
            data: samples to calculate LIDs of
            batch: samples to calculate LIDs against
            k: the number of nearest neighbors to consider

        Returns: the calculated LID values

        """
        k = min(k, len(data) - 1)
        f = lambda v: -k / np.sum(np.log(v / v[-1]))

        gamma = self.classifier.kernel.gamma
        if gamma is None:
            gamma = 1.0 / self.training_data_ndarray.shape[1]

        if batch is None:
            # K = cdist(data, data)
            K = rbf_kernel(data, Y=data, gamma=gamma)
            K = np.reciprocal(K)
            # get the closest k neighbours
            a = np.apply_along_axis(np.sort, axis=1, arr=K)[:, 1:k + 1]
        else:
            batch = np.asarray(batch, dtype=np.float32)
            # K = cdist(data, batch)
            K = rbf_kernel(data, Y=batch, gamma=gamma)
            K = np.reciprocal(K)
            # get the closest k neighbours
            a = np.apply_along_axis(np.sort, axis=1, arr=K)[:, 0:k]

        a = np.apply_along_axis(f, axis=1, arr=a)
        return np.nan_to_num(a)
示例#2
0
    def mle_batch_euclidean(self, data, k):
        """
        Calculates LID values of data w.r.t batch
        Args:
            data: samples to calculate LIDs of
            batch: samples to calculate LIDs against
            k: the number of nearest neighbors to consider

        Returns: the calculated LID values

        """
        batch = self.training_data_ndarray
        f = lambda v: -k / np.sum(np.log((v / v[-1]) + 1e-9))
        gamma = self.classifier.kernel.gamma
        if gamma is None:
            gamma = 1.0 / self.training_data_ndarray.shape[1]
        K = rbf_kernel(data, Y=batch, gamma=gamma)
        K = np.reciprocal(K)
        # K = cdist(data, batch)
        # get the closest k neighbours
        if self.xc is not None and self.xc.shape[0] == 1:
            # only one attack sample
            sorted_distances = np.sort(K)[0, 1:1 + k]
        else:
            sorted_distances = np.sort(K)[0, 0:k]
        a = np.apply_along_axis(f, axis=0, arr=sorted_distances)
        return np.nan_to_num(a)
示例#3
0
    def objective_function_gradient(self, xc, normalization=True):
        """
        Compute the loss derivative wrt the attack sample xc

        The derivative is decomposed as:

        dl / x = sum^n_c=1 ( dl / df_c * df_c / x )
        """

        xc = xc.atleast_2d()
        n_samples = xc.shape[0]

        if n_samples > 1:
            raise TypeError("x is not a single sample!")

        # index of poisoning point within xc.
        # This will be replaced by the input parameter xc
        if self._idx is None:
            idx = 0
        else:
            idx = self._idx

        self._xc[idx, :] = xc
        clf, tr = self._update_poisoned_clf()

        y_ts = self._y_target if self._y_target is not None else self.val.Y

        # computing gradient of loss(y, f(x)) w.r.t. f
        _, score = clf.predict(self.val.X, return_decision_function=True)

        grad = CArray.zeros((xc.size, ))

        if clf.n_classes <= 2:
            loss_grad = self._attacker_loss.dloss(y_ts,
                                                  CArray(score[:, 1]).ravel())
            grad = self._gradient_fk_xc(self._xc[idx, :], self._yc[idx], clf,
                                        loss_grad, tr)
        else:
            # compute the gradient as a sum of the gradient for each class
            for c in range(clf.n_classes):
                loss_grad = self._attacker_loss.dloss(y_ts, score, c=c)

                grad += self._gradient_fk_xc(self._xc[idx, :], self._yc[idx],
                                             clf, loss_grad, tr, c)

        # TODO: add lid loss gradient here
        a = cdist(xc.tondarray(), self.training_data_ndarray)
        sort_indices = np.apply_along_axis(np.argsort, axis=1,
                                           arr=a)[:, 0:self.lid_k]
        neighbors = self.training_data_ndarray[sort_indices, :].squeeze()
        # Create a function to compute the gradient
        grad_lid_cost = autograd_gradient(self.lid_cost, 0)
        lid_gradient = grad_lid_cost(xc.tondarray(), neighbors, self.lid_k)
        grad = grad - CArray(lid_gradient.reshape(grad.shape))
        if normalization:
            norm = grad.norm()
            return grad / norm if norm > 0 else grad
        else:
            return grad
示例#4
0
    def simulate(self, u, t_span, x0, t_eval=None):
        """simulate calls solve_ivp and returns a Trajectory object.
		u in the returned trajectory is the underlying noiseless trajectory.
		TODO: find a way to return the noisy trajectory? (get the u_noisy out of f)."""
        sol = solve_ivp(lambda t, x: self.f(x, u(t)),
                        t_span,
                        x0,
                        t_eval=t_eval)
        y = np.apply_along_axis(lambda x: self.g(x), 0, sol.y)
        return Trajectory(t=sol.t,
                          x=sol.y,
                          y=y,
                          u=np.array([u(t) for t in sol.t]).T)
示例#5
0
    def emissionProb( self, t, forward=False, xs=None ):
        if( xs is None ):
            emiss = self.L[ t - 1 ]
        else:

            emiss = np.zeros( self.K )

            for i, ( n1, n2, n3 ) in enumerate( zip( self.n1Trans, self.n2Trans, self.n3Trans ) ):

                def ll( _x ):
                    x, x1 = np.split( _x, 2 )
                    return Regression.log_likelihood( ( x, x1 ), nat_params=( n1, n2, n3 ) )

                emiss[ i ] = np.apply_along_axis( ll, -1, np.hstack( ( xs[ :-1, t ], xs[ 1: , t ] ) ) )

        return emiss if forward == True else np.broadcast_to( emiss, ( self.K, self.K ) )
示例#6
0
    def preprocessData( self, xs, u=None, computeMarginal=True ):
        xs = np.array( xs )

        # Not going to use multiple measurements here
        assert xs.ndim == 2

        self._T = xs.shape[ 0 ]

        # Compute P( x_t | x_t-1, z ) for all of the observations over each z

        self.L0 = Normal.log_likelihood( xs[ 0 ], nat_params=( self.n1_0, self.n2_0 ) )

        self.L = np.empty( ( self.T - 1, self.K ) )

        for i, ( n1, n2, n3 ) in enumerate( zip( self.n1Trans, self.n2Trans, self.n3Trans ) ):

            def ll( _x ):
                x, x1 = np.split( _x, 2 )
                return Regression.log_likelihood( ( x, x1 ), nat_params=( n1, n2, n3 ) )

            self.L[ :, i ] = np.apply_along_axis( ll, -1, np.hstack( ( xs[ :-1 ], xs[ 1: ] ) ) )
def elbo(params, t):
    mean, log_std = params[0], params[1]
    samples = rs.randn(100, 1) * np.exp(log_std) + mean
    L = gaussian_entropy(log_std) + np.mean(logpx(samples))
    return -L


fig = plt.figure(figsize=(22, 8))
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2, projection='3d')
x = np.linspace(-2, 6, 20)
y = np.linspace(-10, 1.5, 20)
X, Y = np.meshgrid(x, y)
A = np.concatenate([np.atleast_2d(X.ravel()), np.atleast_2d(Y.ravel())]).T
zs = np.apply_along_axis(lambda x: elbo(x, 0), 1, A)
Z = zs.reshape(X.shape)


def callback(params, t, g):
    print("Iteration {0:} " \
          "lower bound {1:.4f}; " \
          "mean {2:.4f} [{3:.4f}]; " \
          "variance {4:.4f}[{5:.4f}]".format(
        t,
        -elbo(params, t),
        params[0],
        true_mean,
        np.exp(params[1]) ,
        true_std))
    ax1.clear()
示例#8
0
def vectorize_if_needed(f, a, axis=-1):
    if a.ndim > 1:
        return np.apply_along_axis(f, axis, a)
    else:
        return f(a)
示例#9
0
 def sampleEmissions(self, x):
     # Sample from P( y | x, ϴ )
     assert x.ndim == 1
     return np.apply_along_axis(self.sampleSingleEmission, -1,
                                x.reshape((-1, 1))).ravel()[None]
示例#10
0
 def sampleEmissions( self, x ):
     # Sample from P( y | x, ϴ )
     assert x.ndim == 2
     return np.apply_along_axis( self.sampleSingleEmission, -1, x )[ None ]
示例#11
0
def cost(pts, globalPts, startIdx, distObs, delta_t):
    # Endpoint cost
    posDiff = bspline(0, extractPts(pts,
                                    startIdx + 5)) - globalPts[startIdx + 5]
    velDiff = bsplineVel(0, extractPts(pts, startIdx + 5),
                         delta_t) - bsplineVel(
                             0, extractPts(globalPts, startIdx + 5), delta_t)
    E_ep = lambda_p * np.dot(posDiff, posDiff) + lambda_v * np.dot(
        velDiff, velDiff)

    # Collision cost
    u = np.linspace(0, 1, 5)
    samples = np.vstack((np.repeat(np.arange(6), len(u)), np.tile(u, 6)))

    def computeDist(sample):
        p = bspline(sample[1], extractPts(pts, sample[0] + startIdx))
        return distObs[np.clip(np.int(p[0]), 0, distObs.shape[0] - 1),
                       np.clip(np.int(p[1]), 0, distObs.shape[1] - 1)]

    distances = np.apply_along_axis(computeDist, 0, samples)
    mask = distances <= OBSTACLE_DISTANCE_THRESHOLD
    distances[mask] = np.square(distances[mask] - OBSTACLE_DISTANCE_THRESHOLD
                                ) / (2 * OBSTACLE_DISTANCE_THRESHOLD)
    distances[np.invert(mask)] = 0

    def computeVelocities(sample):
        p = bsplineVel(sample[1], extractPts(pts, sample[0] + startIdx),
                       delta_t)
        return norm(p)

    velocities = np.apply_along_axis(computeVelocities, 0, samples)
    E_c = lambda_c * np.sum(np.dot(distances, velocities)) / (len(u) * 6)

    # Squared derivative cost
    q2, q3, q4 = Q_2(delta_t), Q_3(delta_t), Q_4(delta_t)
    E_q = 0
    for i in range(6):
        A = np.dot(M_6, extractPts(pts, startIdx + i))
        B = A.T
        E_q = E_q + np.sum(lambda_q2 * np.dot(np.dot(B, q2), A) +
                           lambda_q3 * np.dot(np.dot(B, q3), A) +
                           lambda_q4 * np.dot(np.dot(B, q4), A))

    # Derivative limit cost
    max_vel, max_acc, max_jerk, max_snap = np.array([1000, 1000]), np.array(
        [1000, 1000]), np.array([1e10, 1e10]), np.array([1e10, 1e10])
    u = np.linspace(0, 1, 5)
    samples = np.vstack((np.repeat(np.arange(6), len(u)), np.tile(u, 6)))

    def derivativeCost(pFunc, max_p, delta_t):
        def f(sample):
            p = pFunc(sample[1], extractPts(pts, sample[0] + startIdx),
                      delta_t)
            norm_max = norm(max_p)
            norm_p = norm(p)
            return np.exp(norm_p - norm_max) - 1 if norm_p > norm_max else 0

        return f

    E_l = 0
    for sample in zip(samples[0], samples[1]):
        E_l = E_l + derivativeCost(bsplineVel, max_vel, delta_t)(sample)
        E_l = E_l + derivativeCost(bsplineAcc, max_acc, delta_t)(sample)
        E_l = E_l + derivativeCost(bsplineJerk, max_jerk, delta_t)(sample)
        E_l = E_l + derivativeCost(bsplineSnap, max_snap, delta_t)(sample)
    E_l = E_l / (len(u) * 6)

    # Total cost
    E = E_ep + E_c + E_q + E_l

    # if not isinstance(E_ep, autograd.numpy.numpy_boxes.ArrayBox):
    #     print('[{}] {} | {} | {} | {} => {}'.format(startIdx, E_ep, E_c, E_q, E_l, E))
    return E
示例#12
0
 def synthesize(self, alpha):
     alpha = np.array(alpha, ndmin=2)
     airfoils = np.apply_along_axis(
         lambda x: synthesize_ffd(x, self.airfoil0, self.m, self.n, self.Px
                                  ), 1, alpha)
     return np.squeeze(airfoils)
示例#13
0
 def __call__(self, x):
     x = np.array(x, ndmin=2)
     y = np.apply_along_axis(
         lambda x: evaluate(self.synthesize(x), self.config_fname), 1, x)
     self.y = np.squeeze(y)
     return self.y