Beispiel #1
0
def empirical_risk(X, y, w):
    N = len(X)
    f_1 = (-y) * np.log(sigmoid_func(inner1d(w.T, X)[:, np.newaxis]))
    f_2 = (1 - y) * np.log(1 - sigmoid_func(inner1d(w.T, X)[:, np.newaxis]))
    f = (f_1 - f_2).sum(axis=0)
    empirical_error = f / N
    return empirical_error
Beispiel #2
0
 def test_broadcast(self):
     msg = "broadcast"
     a = np.arange(4).reshape((2, 1, 2))
     b = np.arange(4).reshape((1, 2, 2))
     assert_array_equal(umt.inner1d(a, b),
                        np.sum(a * b, axis=-1),
                        err_msg=msg)
     msg = "extend & broadcast loop dimensions"
     b = np.arange(4).reshape((2, 2))
     assert_array_equal(umt.inner1d(a, b),
                        np.sum(a * b, axis=-1),
                        err_msg=msg)
     msg = "broadcast in core dimensions"
     a = np.arange(8).reshape((4, 2))
     b = np.arange(4).reshape((4, 1))
     assert_array_equal(umt.inner1d(a, b),
                        np.sum(a * b, axis=-1),
                        err_msg=msg)
     msg = "extend & broadcast core and loop dimensions"
     a = np.arange(8).reshape((4, 2))
     b = np.array(7)
     assert_array_equal(umt.inner1d(a, b),
                        np.sum(a * b, axis=-1),
                        err_msg=msg)
     msg = "broadcast should fail"
     a = np.arange(2).reshape((2, 1, 1))
     b = np.arange(3).reshape((3, 1, 1))
     try:
         ret = umt.inner1d(a, b)
         assert_equal(ret, None, err_msg=msg)
     except ValueError:
         None
def runTests(source, target):
    
    if check(source, target):
        return runTests(target, source)
    
    test = pd.read_csv(os.path.join('/scratch', 'ah3243', 'content_test.csv'))
    train = loadData(source, target)
    
    model = {}
    path = os.path.join('/scratch', 'dev241', 'capstone', 'fast')
    
    model[source] = fasttext.load_model(os.path.join(path, 'wiki.{}.bin'.format(source)))
    model[target] = fasttext.load_model(os.path.join(path, 'wiki.{}.bin'.format(target)))
    
    bilingual_dictionary = list(zip(train['source'],train['target']))
    
    source_matrix, target_matrix = make_training_matrices(model[source], model[target], bilingual_dictionary)
    
    transform = learn_transformation(source_matrix, target_matrix)
    
    print("Before trans:", np.mean(inner1d(target_matrix, source_matrix)))
    
    print("After trans:", np.mean(inner1d(normalized(target_matrix), np.matmul(transform, normalized(source_matrix).T).T)))
    
    bilingual_dictionary = list(zip(test['source'],test['target']))

    source_matrix_test, target_matrix_test = make_training_matrices(model[source], model[target], bilingual_dictionary)
    
    
    target_matrix_test = normalized(target_matrix_test)
    source_matrix_test = normalized(source_matrix_test)
    
    print("Before trans:",np.mean(inner1d(target_matrix_test, source_matrix_test)))
    #after
    print("After trans:", np.mean(inner1d(target_matrix_test, np.matmul(transform, source_matrix_test.T).T)))
Beispiel #4
0
 def test_endian(self):
     msg = "big endian"
     a = np.arange(6, dtype='>i4').reshape((2,3))
     assert_array_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1), err_msg=msg)
     msg = "little endian"
     a = np.arange(6, dtype='<i4').reshape((2,3))
     assert_array_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1), err_msg=msg)
Beispiel #5
0
    def diffract_photons(self, photons, intersect, interpos, intercoos):
        '''Vectorized implementation'''
        p = norm_vector(photons['dir'].data[intersect])
        l, d, n = self.e_groove_coos(intercoos[intersect])
        # Minus sign here because we want n, l, d to be a right-handed coordinate system
        d = - d

        wave = energy2wave / photons['energy'].data[intersect]
        # calculate angle between normal and (ray projected in plane perpendicular to groove)
        # -> this is the blaze angle
        p_perp_to_grooves = norm_vector(p - inner1d(p, l)[:, None] * l)
        # Use abs here so that blaze angle is always in 0..pi/2
        # independent of the relative orientation of p and n.
        blazeangle = np.arccos(np.abs(inner1d(p_perp_to_grooves, n)))
        blazeangle += self.blaze_angle_modifier(intercoos[intersect, :])
        m, prob = self.order_selector(photons['energy'].data[intersect],
                                      photons['polarization'].data[intersect],
                                      blazeangle)

        # The idea to calculate the components in the (d,l,n) system separately
        # is taken from MARX
        sign = self.order_sign_convention(p, d)
        p_d = inner1d(p, d) + sign * m * wave / self.d(intercoos[intersect, :])
        p_l = inner1d(p, l)
        # The norm for p_n can be derived, but the direction needs to be chosen.
        p_n = np.sqrt(1. - p_d**2 - p_l**2)
        # Check if the photons have same direction compared to normal before
        direction = np.sign(inner1d(p, n), dtype=np.float)
        if not self.transmission:
            direction *= -1
        dir = p_d[:, None] * d + p_l[:, None] * l + (direction * p_n)[:, None] * n
        return dir, m, prob, blazeangle
def HausdorffDist(A, B):
    # Hausdorf Distance: Compute the Hausdorff distance between two point
    # clouds.
    # Let A and B be subsets of metric space (Z,dZ),
    # The Hausdorff distance between A and B, denoted by dH(A,B),
    # is defined by:
    # dH(A,B) = max(h(A,B),h(B,A)),
    # where h(A,B) = max(min(d(a,b))
    # and d(a,b) is a L2 norm
    # dist_H = hausdorff(A,B)
    # A: First point sets (MxN, with M observations in N dimension)
    # B: Second point sets (MxN, with M observations in N dimension)
    # ** A and B may have different number of rows, but must have the same
    # number of columns.
    #
    # Edward DongBo Cui; Stanford University; 06/17/2014

    # Find pairwise distance
    D_mat = np.sqrt(
        inner1d(A, A)[np.newaxis].T + inner1d(B, B) - 2 * (np.dot(A, B.T)))
    # Find DH
    dH = np.max(
        np.array(
            [np.max(np.min(D_mat, axis=0)),
             np.max(np.min(D_mat, axis=1))]))
    return (dH)
Beispiel #7
0
 def f(L, x, bbt, bE, EtE):
     Lx = np.matmul(L, x)
     y = bbt - 2 * np.sum(inner1d(bE, Lx)) + np.sum(
         inner1d(Lx, np.matmul(Lx, EtE)))
     # y = bbt - 2 *  np.einsum('ii', np.einsum('ij,kj->ik', Lx, bE))\
     #     + np.einsum('ii', np.einsum('ij,kj->ik', np.matmul(Lx, EtE), Lx))
     return 0.5 * y
Beispiel #8
0
    def predict_batch_XY(self, X, Y):
        """
        params
        X: sequence of length n consisting of xs; [x_i]
            ... observed data on X
        Y: sequence of length n consisting of ys; [y_i]
            ... observed data on Y

        returns
        """

        print('phsic.predict_batch_XY()...', file=sys.stderr)

        A_new = np.array([
            phsic.kernel.icd_for_new_data(self.A, self.pivot_xids,
                                          self.pivot_xs, self.k, x) for x in X
        ])
        B_new = np.array([
            phsic.kernel.icd_for_new_data(self.B, self.pivot_yids,
                                          self.pivot_ys, self.l, y) for y in Y
        ])

        if self.no_centering:
            return inner1d(A_new, (self.C_ICD_NC @ B_new.T).T)
        else:
            return inner1d(A_new - self.a_mean,
                           (self.C_ICD @ (B_new - self.b_mean).T).T)
Beispiel #9
0
def get_moments(images, sumsx, sumsy, sumsz, totalE, m):
    ecal_size = 25
    totalE = np.squeeze(totalE)
    index = images.shape[0]
    momentX = np.zeros((index, m))
    momentY = np.zeros((index, m))
    momentZ = np.zeros((index, m))
    ECAL_midX = np.zeros(index)
    ECAL_midY = np.zeros(index)
    ECAL_midZ = np.zeros(index)
    if (totalE == 0).any(): return momentX, momentY, momentZ
    for i in range(m):
        relativeIndices = np.tile(np.arange(ecal_size), (index, 1))
        moments = np.power(
            (relativeIndices.transpose() - ECAL_midX).transpose(), i + 1)
        ECAL_momentX = umath.inner1d(sumsx, moments) / totalE
        if i == 0: ECAL_midX = ECAL_momentX.transpose()
        momentX[:, i] = ECAL_momentX
    for i in range(m):
        relativeIndices = np.tile(np.arange(ecal_size), (index, 1))
        moments = np.power(
            (relativeIndices.transpose() - ECAL_midY).transpose(), i + 1)
        ECAL_momentY = umath.inner1d(sumsy, moments) / totalE
        if i == 0: ECAL_midY = ECAL_momentY.transpose()
        momentY[:, i] = ECAL_momentY
    for i in range(m):
        relativeIndices = np.tile(np.arange(ecal_size), (index, 1))
        moments = np.power(
            (relativeIndices.transpose() - ECAL_midZ).transpose(), i + 1)
        ECAL_momentZ = umath.inner1d(sumsz, moments) / totalE
        if i == 0: ECAL_midZ = ECAL_momentZ.transpose()
        momentZ[:, i] = ECAL_momentZ
    return momentX, momentY, momentZ
Beispiel #10
0
 def test_endian(self):
     msg = "big endian"
     a = np.arange(6, dtype='>i4').reshape((2,3))
     assert_array_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1), err_msg=msg)
     msg = "little endian"
     a = np.arange(6, dtype='<i4').reshape((2,3))
     assert_array_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1), err_msg=msg)
Beispiel #11
0
    def SetVolumeCamera(self, pubsub_evt):
        if self.camera_state:
            #TODO: exclude dependency on initial focus
            cam_focus = np.array(bases.flip_x(pubsub_evt.data))
            cam = self.ren.GetActiveCamera()

            if self.initial_focus is None:
                self.initial_focus = np.array(cam.GetFocalPoint())

            cam_pos0 = np.array(cam.GetPosition())
            cam_focus0 = np.array(cam.GetFocalPoint())

            v0 = cam_pos0 - cam_focus0
            v0n = np.sqrt(inner1d(v0, v0))

            v1 = (cam_focus - self.initial_focus)
            v1n = np.sqrt(inner1d(v1, v1))
            if not v1n:
                v1n = 1.0
            cam_pos = (v1 / v1n) * v0n + cam_focus

            cam.SetFocalPoint(cam_focus)
            cam.SetPosition(cam_pos)

        # It works without doing the reset. Check with trackers if there is any difference.
        # Need to be outside condition for sphere marker position update
        # self.ren.ResetCameraClippingRange()
        # self.ren.ResetCamera()
        self.interactor.Render()
    def grad_power_noise(x):
        """
        Compute the gradient of the power criterion with respect to the width of Gaussian
        RBF kernel and the noise vector.

        Args:
            x: 1 + 2J*d_n vector
        Returns:
            the gradient of the power criterion with respect to kernel width/latent vector
        """

        with util.ContextTimer() as t:
            width, z = unflatten(x)
            zp = z[:J]
            zq = z[J:]

            # Compute the Jacobian of the generators with respect to noise vector
            torch_zp = to_torch_variable(zp, shape=(-1, zp.shape[1], 1, 1),
                                         requires_grad=True)
            torch_zq = to_torch_variable(zq, shape=(-1, zq.shape[1], 1, 1),
                                         requires_grad=True)
            gp_grad = compute_jacobian(torch_zp, gen_p(torch_zp).view(J, -1))  # J x d_pix x d_noise x 1 x 1
            gq_grad = compute_jacobian(torch_zq, gen_q(torch_zq).view(J, -1))  # J x d_pix x d_noise x 1 x 1
            v_grad_z = np.vstack([gp_grad, gq_grad])
            v_grad_z = np.squeeze(v_grad_z, [3, 4])  # 2J x d_pix x d_noise
            
            # Compute the Jacobian of the feature extractor with respect to noise vector
            vp_flatten = to_torch_variable(
                gen_p(torch_zp).view(J, -1).cpu().data.numpy(),
                shape=(J, 3, image_size, image_size),
                requires_grad=True
            )
            vq_flatten = to_torch_variable(
                gen_q(torch_zq).view(J, -1).cpu().data.numpy(),
                shape=(J, 3, image_size, image_size),
                requires_grad=True
            )
            size = (model_input_size, model_input_size)
            upsample = nn.Upsample(size=size, mode='bilinear')
            fp = model(upsample(vp_flatten))
            fq = model(upsample(vq_flatten))
            fp_grad = compute_jacobian(vp_flatten, fp.view(J, -1))  # J x d_nn x C x H x W
            fq_grad = compute_jacobian(vq_flatten, fq.view(J, -1))  # J x d_nn x C x H x W
            f_grad_v = np.vstack([fp_grad, fq_grad])
            f_grad_v = f_grad_v.reshape((2*J, f_grad_v.shape[1], -1))  # 2J x d_nn x d_pix

            # Compute the gradient of the objective function with respect to
            # the gaussian width and test locations
            F = np.vstack([fp.cpu().data.numpy(), fq.cpu().data.numpy()])
            F = np.reshape(F, (2*J, -1))
            grad_obj = autograd.elementwise_grad(flat_obj_feat)  # 1+(2J)*d_nn input
            obj_grad_f = grad_obj(flatten(width, F))
            obj_grad_width = obj_grad_f[0]
            obj_grad_f = np.reshape(obj_grad_f[1:], [(2*J), -1])  # 2J x d_nn array

            obj_grad_v = inner1d(obj_grad_f, np.transpose(f_grad_v, (2, 0, 1)))  # 2J x d_pix
            obj_grad_z = inner1d(obj_grad_v.T, np.transpose(v_grad_z, (2, 0, 1))).flatten()

        return np.concatenate([obj_grad_width.reshape([1]), obj_grad_z]) 
Beispiel #13
0
 def test_type_cast(self):
     msg = "type cast"
     a = np.arange(6, dtype="short").reshape((2, 3))
     assert_array_equal(umt.inner1d(a, a), np.sum(a * a, axis=-1), err_msg=msg)
     msg = "type cast on one argument"
     a = np.arange(6).reshape((2, 3))
     b = a + 0.1
     assert_array_almost_equal(umt.inner1d(a, a), np.sum(a * a, axis=-1), err_msg=msg)
Beispiel #14
0
 def HausdorffDist(A, B):
     D_mat = np.sqrt(
         inner1d(A, A)[np.newaxis].T + inner1d(B, B) - 2 * (np.dot(A, B.T)))
     dH = np.max(
         np.array(
             [np.max(np.min(D_mat, axis=0)),
              np.max(np.min(D_mat, axis=1))]))
     return (dH)
Beispiel #15
0
def vector_angles(X, Y):
    value = np.degrees(
        np.arccos(
            np.clip(inner1d(X, Y) /
                    (np.sqrt(inner1d(X, X)) * np.sqrt(inner1d(Y, Y))),
                    a_min=-1,
                    a_max=1)))
    return value
Beispiel #16
0
 def ModHausdorffDist(A, B):
     D_mat = np.sqrt(
         inner1d(A, A)[np.newaxis].T + inner1d(B, B) - 2 *
         (np.dot(A, B.T)))
     FHD = np.mean(np.min(D_mat, axis=1))
     RHD = np.mean(np.min(D_mat, axis=0))
     MHD = np.max(np.array([FHD, RHD]))
     return MHD
Beispiel #17
0
    def boundedness_selection(self, lagrangecut = 0.9):
        """get a subset of stars that are probably bound.
        
        Arguments:
        lagrangecut -- lagrangian radius outside which stars may be escapers. default: 0.9
        
        Returns:
        Nbody6Subset containing the stars that are probably bound.    
        """
        """# everything here is using the new density center as the origin.
        # if lagrangian radii aren't calculated, get the desired one.
        if self.Lagr_fractions is None:
            self.calc_lagrangian_radii([lagrangecut])
            
        # if the desired cut isn't in the already calculated ones, get it
        if lagrangecut not in self.Lagr_fractions:
            newfracs = np.hstack((self.Lagr_fractions, lagrangecut))
            newfracs.sort()
            self.calc_lagrangian_radii(newfracs)
        """
        self.calc_lagrangian_radii([lagrangecut])
        # get the desired lagrangian radius
        #rcut = self.Lagr_rads[np.where(self.Lagr_fractions == lagrangecut)]
        rcut = self.Lagr_rads[0]
        # get stars inside the cutoff
        selection = (self.Radii_dcm_new <= rcut)
        # and outside
        outside = (self.Radii_dcm_new > rcut)

        # get stars with positive v.r
        vdotr = inner1d(self.Pos - self.dc_pos_new, 
                        self.Vel - self.dc_vel_new)
        streamers = (vdotr > 0)
       
        
        # safe ones: inside or outside and not streaming
        selection = -outside | -(outside & streamers)
        possibles = outside & streamers
        possiblenames = self.Names[possibles]    
                
                
        # get velocities of stars outside the cut
        velrels = self.Vel[possibles] - self.dc_vel_new
        vel2 = np.array(inner1d(velrels, velrels))
        # get squared escape velocity from the masscut at each radius in nbody units
        vesc2 = 2 * self.Masses.sum() * lagrangecut / self.Radii_dcm_new[possibles]
        keepers = (vel2 < vesc2)
        keepnames = possiblenames[keepers] 
        selectionnames = np.vstack((self.Names[selection], keepnames))    
            
        # update radii from density center
        poscm = self.Pos - self.dc_pos
        self.Radii_dcm_new = np.array(np.sqrt(inner1d(poscm, poscm)))  
        if selection.sum() == 0:
            print 'No stars are in the sphere!'
            return None
        else:
            return Nbody6Subset(self, selectionnames) 
 def test_type_cast(self):
     msg = "type cast"
     a = np.arange(6, dtype='short').reshape((2,3))
     assert_array_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1), err_msg=msg)
     msg = "type cast on one argument"
     a = np.arange(6).reshape((2,3))
     b = a+0.1
     assert_array_almost_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1),
         err_msg=msg)
Beispiel #19
0
def wfs_25d_plane(x0, n0, n=[0, 1, 0], xref=[0, 0, 0], c=None):
    r"""Plane wave model by 2.5-dimensional WFS.

    Parameters
    ----------
    x0 : (N, 3) array_like
        Sequence of secondary source positions.
    n0 : (N, 3) array_like
        Sequence of secondary source orientations.
    n : (3,) array_like, optional
        Normal vector (propagation direction) of synthesized plane wave.
    xref : (3,) array_like, optional
        Reference position
    c : float, optional
        Speed of sound

    Returns
    -------
    delays : (N,) numpy.ndarray
        Delays of secondary sources in seconds.
    weights: (N,) numpy.ndarray
        Weights of secondary sources.

    Notes
    -----
    2.5D correction factor

    .. math::

        g_0 = \sqrt{2 \pi |x_\mathrm{ref} - x_0|}

    d using a plane wave as source model

    .. math::

        d_{2.5D}(x_0,t) = h(t)
        2 g_0 \scalarprod{n}{n_0}
        \dirac{t - \frac{1}{c} \scalarprod{n}{x_0}}

    with wfs(2.5D) prefilter h(t), which is not implemented yet.

    References
    ----------
    See http://sfstoolbox.org/en/latest/#equation-d.wfs.pw.2.5D

    """
    if c is None:
        c = defs.c
    x0 = util.asarray_of_rows(x0)
    n0 = util.asarray_of_rows(n0)
    n = util.asarray_1d(n)
    xref = util.asarray_1d(xref)
    g0 = np.sqrt(2 * np.pi * np.linalg.norm(xref - x0, axis=1))
    delays = inner1d(n, x0) / c
    weights = 2 * g0 * inner1d(n, n0)
    return delays, weights
def intersection(A, B, C, D):
    """
    Point of intersection between two great circle arcs.
    source: http://ssb.stsci.edu/doc/stsci_python_x/stsci.sphere.doc/html/_modules/stsci/sphere/great_circle_arc.html

    Parameters
    ----------
    A, B : (*x*, *y*, *z*) triples or Nx3 arrays of triples
        Endpoints of the first great circle arc.
    C, D : (*x*, *y*, *z*) triples or Nx3 arrays of triples
        Endpoints of the second great circle arc.

    Returns
    -------
    T : (*x*, *y*, *z*) triples or Nx3 arrays of triples
        If the given arcs intersect, the intersection is returned. If the arcs do not intersect,
        the triple is set to all NaNs.
    """
    A = np.asanyarray(A)
    B = np.asanyarray(B)
    C = np.asanyarray(C)
    D = np.asanyarray(D)

    A, B = np.broadcast_arrays(A, B)
    C, D = np.broadcast_arrays(C, D)

    ABX = fast_cross(A, B)
    CDX = fast_cross(C, D)
    T = cross_and_normalize(ABX, CDX)
    T_ndim = len(T.shape)

    if T_ndim > 1:
        s = np.zeros(T.shape[0])
    else:
        s = np.zeros(1)
    s += np.sign(inner1d(fast_cross(ABX, A), T))
    s += np.sign(inner1d(fast_cross(B, ABX), T))
    s += np.sign(inner1d(fast_cross(CDX, C), T))
    s += np.sign(inner1d(fast_cross(D, CDX), T))
    if T_ndim > 1:
        s = np.expand_dims(s, 2)

    cross = np.where(s == -4, -T, np.where(s == 4, T, np.nan))

    # If they share a common point, it's not an intersection.  This
    # gets around some rounding-error/numerical problems with the
    # above.
    equals = (np.all(A == C, axis=-1) | np.all(A == D, axis=-1)
              | np.all(B == C, axis=-1) | np.all(B == D, axis=-1))

    equals = np.expand_dims(equals, 2)

    T = np.where(equals, np.nan, cross)

    return T
Beispiel #21
0
def intersection(A: np.ndarray, B: np.ndarray, C: tuple, D: tuple):
    """
    Returns the point(s) of intersection between two great circle arcs.
    The arcs are defined between the points *AB* and *CD*.  Either *A*
    and *B* or *C* and *D* may be arrays of points, but not both.

    :param A: An array of Nx3 dimension including x,y,z tuples describing
        a path on a sphere
    :type A: numpy.ndarray
    :param B: Same as A
    :type B: numpy.ndarray
    :param C: A tuple with (x, y, z) coordinates on a unit sphere
    :type C: tuple
    :param D: Same as C
    :type D: tuple
    """

    A = np.asanyarray(A)
    B = np.asanyarray(B)
    C = np.asanyarray(C)
    D = np.asanyarray(D)

    A, B = np.broadcast_arrays(A, B)
    C, D = np.broadcast_arrays(C, D)

    ABX = np.cross(A, B)
    CDX = np.cross(C, D)
    T = _cross_and_normalize(ABX, CDX)
    T_ndim = len(T.shape)

    if T_ndim > 1:
        s = np.zeros(T.shape[0])
    else:
        s = np.zeros(1)
    s += np.sign(inner1d(np.cross(ABX, A), T))
    s += np.sign(inner1d(np.cross(B, ABX), T))
    s += np.sign(inner1d(np.cross(CDX, C), T))
    s += np.sign(inner1d(np.cross(D, CDX), T))
    if T_ndim > 1:
        s = two_d(s)

    cross = np.where(s == -4, -T, np.where(s == 4, T, np.nan))

    # If they share a common point, it's not an intersection.  This
    # gets around some rounding-error/numerical problems with the
    # above.
    equals = (np.all(A == C, axis=-1)
              | np.all(A == D, axis=-1)
              | np.all(B == C, axis=-1)
              | np.all(B == D, axis=-1))

    equals = two_d(equals)

    return np.where(equals, np.nan, cross)
 def fit(self, X):
     
     self.n_data, self.n_features  = X.shape
     
     if self.initializeMethod == "Random":
         self.mean_vectors = np.random.uniform(0,1, size=[self.n_clusters,self.n_features])
         self.covariance_matrices = np.random.uniform(0,1, size=[self.n_features,self.n_features,self.n_clusters])
         self.alpha_vectors = np.array([0.5,0.5])
     
     self.log_likelihood_values = []
     for iteration in range(0,self.max_iterations):
         ### Expectation Step
         for i in range(0, self.n_clusters):
             temp_likelihood = inner1d(((X - self.mean_vectors[i]).dot(np.linalg.inv(self.covariance_matrices[i]))), ((X - self.mean_vectors[i])))
             likelihood = np.exp(-temp_likelihood/2.0)
             likelihood = likelihood / ((2* math.pi * abs(np.linalg.det(self.covariance_matrices[i])))**0.5)
             e_step = self.alpha_vectors[i] * likelihood
             if i==0:
                 expectation = e_step
             else:
                 expectation = np.vstack([expectation,e_step])
         expectation = expectation /  np.sum(expectation, axis=0)
            
         ### Maximization step (Updation)
         for i in range(0, self.n_clusters):
             ### update mean vector
             mean_temp = np.multiply(X.T, expectation[i]).T
             self.mean_vectors[i,:] = np.sum(mean_temp, axis=0) / np.sum(expectation[i])
             ### update covariace matrix
             covariance_temp1 = np.einsum('ij,kj->jik',(X - self.mean_vectors[i]).T,(X - self.mean_vectors[i]).T)
             covariance_temp2 = np.multiply(covariance_temp1.T, expectation[i]).T
             self.covariance_matrices[i,:,:] = np.sum(covariance_temp2, axis=0) / np.sum(expectation[i])
             ### update alpha vector
             self.alpha_vectors[i] = np.sum(expectation[i]) /  self.n_data
          
         ### evaluate log likelihood
         for i in range(0, self.n_clusters):
             temp_likelihood2 = inner1d(((X - self.mean_vectors[i]).dot(np.linalg.inv(self.covariance_matrices[i]))), ((X - self.mean_vectors[i])))
             likelihood2 = np.exp(-temp_likelihood2/2.0)
             likelihood2 = likelihood2 / ((2* math.pi * abs(np.linalg.det(self.covariance_matrices[i])))**0.5)
             e_step2 = self.alpha_vectors[i] * likelihood2
             if i==0:
                 expectation2 = e_step2
             else:
                 expectation2 = np.vstack([expectation2,e_step2])
         log_likelihood = np.sum(expectation2, axis=0)
         log_likelihood = np.sum(np.log(log_likelihood))
         self.log_likelihood_values.append(log_likelihood)
         if iteration > 0:
             if self.log_likelihood_values[iteration] == self.log_likelihood_values[iteration-1]:
                 print("Converged at iteration ", iteration)
                 break
         ### assign cluster values
         self.labels_ = np.argmax(expectation2,axis=0)
def HausdorffDist(A, B):

    # Find pairwise distance
    D_mat = np.sqrt(
        inner1d(A, A)[np.newaxis].T + inner1d(B, B) - 2 * (np.dot(A, B.T)))
    # Find DH
    dH = np.max(
        np.array(
            [np.max(np.min(D_mat, axis=0)),
             np.max(np.min(D_mat, axis=1))]))
    return (dH)
Beispiel #24
0
    def predict_batch_training_data(self):
        print('phsic.predict_batch_training_data()...', file=sys.stderr)

        A_new = self.A
        B_new = self.B

        if self.no_centering:
            return inner1d(A_new, (self.C_ICD_NC @ B_new.T).T)
        else:
            return inner1d(A_new - self.a_mean,
                           (self.C_ICD @ (B_new - self.b_mean).T).T)
Beispiel #25
0
def _haus_dist_95(A, B):
    """ compute the 95 percentile hausdorff distance """
    # Find pairwise distance
    D_mat = np.sqrt(
        inner1d(A, A)[np.newaxis].T + inner1d(B, B) - 2 * (np.dot(A, B.T)))
    dist1 = np.min(D_mat, axis=0)
    dist2 = np.min(D_mat, axis=1)
    hd95 = np.percentile(np.hstack((dist1, dist2)), 95)

    # hd = np.max(np.array([np.max(np.min(D_mat, axis=0)), np.max(np.min(D_mat, axis=1))]))

    return hd95
Beispiel #26
0
    def velocity_correlation(self, t_i, t_f, t_c=0.5):
        """Calculate translational and rotation velocity correlaiton.
        
        Parameters
        ----------
        t_i : float
        t_f : float
            t_i, t_f -> Start, end time of trajectory
        t_c : float
            Correlation time.

        Returns
        -------
        t_vec : float[:], shape = len(corr)
        trn_corr_mat : float[:,:], shape = (len(corr), num_mol)
        rot_corr_mat : float[:,:], shape = (len(corr), num_mol)
        """
        if t_c*2 > t_f - t_i:
            print("Correlation time is too long. Maximum: half of trajectory.")
            exit(1)

        dt = self._universe.trajectory[1].time - self._universe.trajectory[0].time
        t_vec = np.arange(0, t_c*1.0001, dt)
        frame_i = int(t_i/dt)
        frame_f = int(t_f/dt)
        num_frame = frame_f - frame_i + 1

        num_mol = int(self._num_atom/3)
        vel_trn_mat3 = np.zeros((num_frame, num_mol, 3))
        vel_rot_mat3 = np.zeros_like(vel_trn_mat3)

        trn_corr_mat = np.zeros((len(t_vec), num_mol))
        rot_corr_mat = np.zeros_like(trn_corr_mat)

        for i in (range(num_frame)):
            ts = self._universe.trajectory[frame_i + i]
            box_vec = ts.dimensions
            pos_atom_mat = self._atom_vec.positions
            vel_atom_mat = self._atom_vec.velocities
            vel_trn_mat3[i], vel_rot_mat3[i], I_mat = self._decompose_velocity(pos_atom_mat, vel_atom_mat, box_vec)

        for i in (range(len(t_vec))):
            vel_trn_0 = vel_trn_mat3[0:-1-i].reshape((-1,3))
            vel_trn_t = vel_trn_mat3[i:-1].reshape((-1,3))
            trn_corr_mat[i] = self._mass_h2o*np.mean(inner1d(vel_trn_0, vel_trn_t).reshape((-1, num_mol)), axis=0)

        I_vec = np.array([I_mat[0,0], I_mat[1,1], I_mat[2,2]])
        for i in (range(len(t_vec))):
            vel_rot_0 = I_vec*vel_rot_mat3[0:-1-i].reshape((-1,3))
            vel_rot_t = vel_rot_mat3[i:-1].reshape((-1,3))
            rot_corr_mat[i] = np.mean(inner1d(vel_rot_0, vel_rot_t).reshape((-1,num_mol)), axis=0)

        return(t_vec, trn_corr_mat, rot_corr_mat)
Beispiel #27
0
def mod_Hausdorff_distance(model_set, test_set):

    # Find pairwise distance
    D_mat = inner1d(model_set, model_set)[numpy.newaxis].T + inner1d(
        test_set, test_set) - 2 * (numpy.dot(model_set, test_set.T))
    # Calculating the forward HD: mean(min(each col))
    FHD = numpy.mean(numpy.min(D_mat, axis=1))
    # Calculating the reverse HD: mean(min(each row))
    RHD = numpy.mean(numpy.min(D_mat, axis=0))
    # Calculating mhd
    MHD = max(FHD, RHD)
    return MHD
Beispiel #28
0
    def ae_gradient_1s_y(self, X_in, y, kernel='exp'):
        if kernel == 'exp':
            point = array(X_in)
            point = array(point)
            dif_vectors = self.setN - point
            dif_and_varianced = array(matrix(dif_vectors) * self.sv_kernel)
            dif_traces = inner1d(dif_and_varianced, dif_vectors)
            weights = exp(-0.5 * self.__S * dif_traces)

            origin_up = (self.Y * (matrix(self.percentages * weights).T))[0, 0]
            origin_down = inner(self.percentages, weights)
            delta_up = (
                self.Y *
                (matrix(self.percentages * weights * array(dif_traces)).T))[0,
                                                                            0]
            delta_down = inner(self.percentages, dif_traces * weights)
            #print origin_up,'up'
            #print dif_traces
            #print origin_down
            #print delta_up
            #print delta_down
            results = (self.Y *
                       (matrix(self.percentages * weights).T)) / (inner(
                           self.percentages, weights))
            Y_pred = array(results.T)[0]
            #print Y,Y_pred
            gradient = (y - Y_pred) * (delta_up * origin_down -
                                       origin_up * delta_down) / (
                                           (origin_down)**2.0)
            #print gradient
            return gradient[0]
        elif kernel == 'rec':
            point = array(X_in)
            point = array(point)
            dif_vectors = self.setN - point
            dif_and_varianced = array(matrix(dif_vectors) * self.sv_kernel)
            dif_traces = inner1d(dif_and_varianced, dif_vectors)
            weights = self.__recip_kernel(dif_traces, self.__S)
            dif_weights = self.__recip_kernel_dif(dif_traces, self.__S)
            origin_up = (self.Y * (matrix(self.percentages * weights).T))[0, 0]
            origin_down = inner(self.percentages, weights)
            D_up = (self.Y * (matrix(self.percentages * dif_weights).T))[0, 0]
            origin_down = inner(self.percentages, weights)
            D_down = inner(self.percentages, dif_weights)
            Y_pred = origin_up / origin_down
            gradient = 2 * (y - Y_pred) * (
                D_up * origin_down - origin_up * D_down) / ((origin_down)**2.0)
            return gradient
        else:

            print('No kernel specified...')
            return 0
    def detect_collision(self, cub1, cub2):
        # Caluclate all 15 normals
        normals = self.get_normals(cub1, cub2)
        for normal in normals:
            # Calculate projections
            projects1 = inner1d(normal, cub1.vertices)
            projects2 = inner1d(normal, cub2.vertices)
            
            # Gap detected
            if np.max(projects1) < np.min(projects2) or \
               np.max(projects2) < np.min(projects1): return False

        return True
Beispiel #30
0
    def compute(self, nbs, dt, buf_items, buf_pairs):
        F = buf_items
        F.fill(0)
        
        dPcorr = np.zeros_like(F)

        # detect collisions
        overlaps, b1_idx, b2_idx = nbs.overlapping_pairs

        if len(overlaps):
            vdv = inner1d(nbs.V[b1_idx], nbs.V[b2_idx])
            approaching = vdv < -0.01
            
            if np.sum(approaching):
                hits = overlaps[approaching]
                b1h_idx = b1_idx[approaching]
                b2h_idx = b2_idx[approaching]

                # unit collision vector
                dPh = nbs.unit_p1p2_dense[hits]

                # masses and velocities of colliding pairs
                M1h = nbs.M[b1h_idx]
                M2h = nbs.M[b2h_idx]
                V1h = nbs.V[b1h_idx]
                V2h = nbs.V[b2h_idx]

                # project V1 and V2 onto dP
                bdb = inner1d(dPh, dPh)
                V1p = (inner1d(V1h, dPh) / bdb)[:, np.newaxis] * dPh
                V2p = (inner1d(V2h, dPh) / bdb)[:, np.newaxis] * dPh

                # orthogonal component sticks around
                V1o = V1h - V1p
                V2o = V2h - V2p

                # new velocities after collision
                V1f = ((M1h - M2h) / (M1h + M2h))[:, np.newaxis] * V1p  +  (2 * M2h / (M1h + M2h))[:, np.newaxis] * V2p
                V2f = (2 * M1h / (M1h + M2h))[:, np.newaxis] * V1p  -  ((M1h - M2h) / (M1h + M2h))[:, np.newaxis] * V2p 

                F[b1h_idx,:] += M1h[:, np.newaxis] * ((V1f + V1o) - V1h) / dt
                F[b2h_idx,:] += M2h[:, np.newaxis] * ((V2f + V2o) - V2h) / dt

            dPh = nbs.unit_p1p2_dense[overlaps]
            hdr = 0.5 * (nbs.pdist_dense[overlaps] - nbs.r1r2_dense[overlaps])[:,np.newaxis]
                
            Mr = (nbs.M[b1_idx] / (nbs.M[b1_idx] + nbs.M[b2_idx]))[:,np.newaxis]
            dPcorr[b1_idx,:] -= Mr * dPh * hdr
            dPcorr[b2_idx,:] += Mr * dPh * hdr

        return F, dPcorr
def HausdorffDist(A, B):
    D_mat = np.sqrt(
        inner1d(A, A)[np.newaxis].T + inner1d(B, B) - 2 * (np.dot(A, B.T)))
    dH = np.max(
        np.array(
            [np.max(np.min(D_mat, axis=0)),
             np.max(np.min(D_mat, axis=1))]))
    #return(dH)
    if (dH > 0.0 and dH < 5.0):
        print("Similares")
    else:
        print("No son similares")

    print(dH)
 def test_incontiguous_array(self):
     msg = "incontiguous memory layout of array"
     x = np.arange(64).reshape((2,2,2,2,2,2))
     a = x[:,0,:,0,:,0]
     b = x[:,1,:,1,:,1]
     a[0,0,0] = -1
     msg2 = "make sure it references to the original array"
     assert_equal(x[0,0,0,0,0,0], -1, err_msg=msg2)
     assert_array_equal(umt.inner1d(a,b), np.sum(a*b,axis=-1), err_msg=msg)
     x = np.arange(24).reshape(2,3,4)
     a = x.T
     b = x.T
     a[0,0,0] = -1
     assert_equal(x[0,0,0], -1, err_msg=msg2)
     assert_array_equal(umt.inner1d(a,b), np.sum(a*b,axis=-1), err_msg=msg)
Beispiel #33
0
 def test_incontiguous_array(self):
     msg = "incontiguous memory layout of array"
     x = np.arange(64).reshape((2, 2, 2, 2, 2, 2))
     a = x[:, 0,:, 0,:, 0]
     b = x[:, 1,:, 1,:, 1]
     a[0, 0, 0] = -1
     msg2 = "make sure it references to the original array"
     assert_equal(x[0, 0, 0, 0, 0, 0], -1, err_msg=msg2)
     assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
     x = np.arange(24).reshape(2, 3, 4)
     a = x.T
     b = x.T
     a[0, 0, 0] = -1
     assert_equal(x[0, 0, 0], -1, err_msg=msg2)
     assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
Beispiel #34
0
    def update(self, pred_batch: np.ndarray, target_batch: np.ndarray):
        """
        Input:
          - pred_batch.shape:   [B, H, W]
          - target_batch.shape: [B, H, W]
        """
        assert len(pred_batch) == len(target_batch)

        for i in range(len(pred_batch)):
            dice_score = []
            ahd_score = []

            for j in range(self.num_classes):
                pred_bool = (pred_batch[i] == j).flatten()
                target_bool = (target_batch[i] == j).flatten()
                if np.alltrue(
                        pred_bool == target_bool):  # 规避dice()在输入全0时输出nan的问题
                    dice_score.append(1.0)
                else:
                    dice_score.append(
                        1 -
                        dice(pred_bool,
                             target_bool))  # Dice score = 1 - Dice distance
                # 计算AHD
                if j == 0:
                    ahd_score.append(-1.0)  # 不计算背景像素的分数,因为会占很大内存,而且没有用处
                else:
                    pred_coord = np.array(np.where(pred_batch[i] == j)).T
                    target_coord = np.array(np.where(target_batch[i] == j)).T
                    if len(target_coord) == 0 and len(
                            pred_coord) == 0:  # 规避0长度数组
                        ahd_score.append(1.0)
                    elif len(target_coord) == 0 or len(pred_coord) == 0:
                        ahd_score.append(0.0)
                    else:
                        D_mat = np.sqrt(
                            inner1d(pred_coord, pred_coord)[np.newaxis].T +
                            inner1d(target_coord, target_coord) -
                            2 * np.dot(pred_coord, target_coord.T))
                        dH = np.max(
                            np.array([
                                np.max(np.min(D_mat, axis=0)),
                                np.max(np.min(D_mat, axis=1))
                            ]))
                        ahd_score.append(dH)

            self.dice_scores.append(tuple(dice_score))
            self.ahd_scores.append(tuple(ahd_score))
def angle(A, B, C, degrees=True):
    """
    Returns the angle at *B* between *AB* and *BC*.

    Parameters
    ----------
    A, B, C : (*x*, *y*, *z*) triples or Nx3 arrays of triples
        Points on sphere.

    degrees : bool, optional
        If `True` (default) the result is returned in decimal degrees,
        otherwise radians.

    Returns
    -------
    angle : float or array of floats
        The angle at *B* between *AB* and *BC*, in range 0 to 2π.

    References
    ----------

    .. [1] Miller, Robert D.  Computing the area of a spherical
       polygon.  Graphics Gems IV.  1994.  Academic Press.
    """
    if HAS_C_UFUNCS:
        angle = math_util.angle(A, B, C)
    else:
        A = np.asanyarray(A)
        B = np.asanyarray(B)
        C = np.asanyarray(C)

        A, B, C = np.broadcast_arrays(A, B, C)

        ABX = _fast_cross(A, B)
        ABX = _cross_and_normalize(B, ABX)
        BCX = _fast_cross(C, B)
        BCX = _cross_and_normalize(B, BCX)
        X = _cross_and_normalize(ABX, BCX)
        diff = inner1d(B, X)
        inner = inner1d(ABX, BCX)
        with np.errstate(invalid='ignore'):
            angle = np.arccos(inner)
        angle = np.where(diff < 0.0, (2.0 * np.pi) - angle, angle)

    if degrees:
        angle = np.rad2deg(angle)

    return angle
Beispiel #36
0
def angle(A, B, C, degrees=True):
    """
    Returns the angle at *B* between *AB* and *BC*.

    Parameters
    ----------
    A, B, C : (*x*, *y*, *z*) triples or Nx3 arrays of triples
        Points on sphere.

    degrees : bool, optional
        If `True` (default) the result is returned in decimal degrees,
        otherwise radians.

    Returns
    -------
    angle : float or array of floats
        The angle at *B* between *AB* and *BC*, in range 0 to 2π.

    References
    ----------

    .. [1] Miller, Robert D.  Computing the area of a spherical
       polygon.  Graphics Gems IV.  1994.  Academic Press.
    """
    if HAS_C_UFUNCS:
        angle = math_util.angle(A, B, C)
    else:
        A = np.asanyarray(A)
        B = np.asanyarray(B)
        C = np.asanyarray(C)

        A, B, C = np.broadcast_arrays(A, B, C)

        ABX = _fast_cross(A, B)
        ABX = _cross_and_normalize(B, ABX)
        BCX = _fast_cross(C, B)
        BCX = _cross_and_normalize(B, BCX)
        X = _cross_and_normalize(ABX, BCX)
        diff = inner1d(B, X)
        inner = inner1d(ABX, BCX)
        with np.errstate(invalid='ignore'):
            angle = np.arccos(inner)
        angle = np.where(diff < 0.0, (2.0 * np.pi) - angle, angle)

    if degrees:
        angle = np.rad2deg(angle)

    return angle
Beispiel #37
0
def pdf_between_data(train_data, input_data, std):
    """ Compute PDF between two samples.

    Parameters
    ----------
    train_data : array
        train sample
    input_data : array
        input sample
    std : float
        standard deviation for PDF
    """
    # Note: This implementation works faster than 3D arrays
    # and use less memory.
    results = zeros((train_data.shape[0], input_data.shape[0]))
    variance = std ** 2
    function_const = std * sqrt(2 * pi)
    train_data_size = train_data.shape[0]

    for i, input_row in enumerate(input_data):
        inputs = tile(input_row, (train_data_size, 1))
        class_difference = (train_data - inputs)
        total_distance = inner1d(class_difference, class_difference)
        results[:, i] = exp(-total_distance / variance) / function_const

    return results
    def _boost_real(self, iboost, X, y, sample_weight, X_argsorted=None):
        """Implement a single boost using the SAMME.R real algorithm."""
        estimator = self._make_estimator()

        if X_argsorted is not None:
            estimator.fit(X, y, sample_weight=sample_weight,
                          X_argsorted=X_argsorted)
        else:
            estimator.fit(X, y, sample_weight=sample_weight)

        y_predict_proba = estimator.predict_proba(X)

        if iboost == 0:
            self.classes_ = getattr(estimator, 'classes_', None)
            self.n_classes_ = len(self.classes_)

        y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
                                       axis=0)

        # Instances incorrectly classified
        incorrect = y_predict != y

        # Error fraction
        estimator_error = np.mean(
            np.average(incorrect, weights=sample_weight, axis=0))

        # Stop if classification is perfect
        if estimator_error <= 0:
            return sample_weight, 1., 0.

        # Construct y coding as described in Zhu et al [2]:
        #
        #    y_k = 1 if c == k else -1 / (K - 1)
        #
        # where K == n_classes_ and c, k in [0, K) are indices along the second
        # axis of the y coding with c being the index corresponding to the true
        # class label.
        n_classes = self.n_classes_
        classes = self.classes_
        y_codes = np.array([-1. / (n_classes - 1), 1.])
        y_coding = y_codes.take(classes == y[:, np.newaxis])

        # Displace zero probabilities so the log is defined.
        # Also fix negative elements which may occur with
        # negative sample weights.
        y_predict_proba[y_predict_proba <= 0] = 1e-5

        # Boost weight using multi-class AdaBoost SAMME.R alg
        estimator_weight = (-1. * self.learning_rate
                                * (((n_classes - 1.) / n_classes) *
                                   inner1d(y_coding, np.log(y_predict_proba))))

        # Only boost the weights if it will fit again
        if not iboost == self.n_estimators - 1:
            # Only boost positive weights
            sample_weight *= np.exp(estimator_weight *
                                    ((sample_weight > 0) |
                                     (estimator_weight < 0)))

        return sample_weight, 1., estimator_error
Beispiel #39
0
 def SAFE(self, lam_max, lam, y, X):
     """
     Screen variables using the SAFE rule.
     """
     resid_prod = np.fabs( inner1d(X.T,resid) )
     idx = resid_prod >= lam - la.norm(X[:,i])*la.norm(y)*((lam_max-lam)/lam_max)
     return np.where(idx)[0]
Beispiel #40
0
 def STRONG(self, lam_max, lam, resid, X):
     """
     Screen variables using the STRONG rule.
     """
     resid_prod = np.fabs( inner1d(X.T,resid) )
     idx = resid_prod >= 2*lam_max - lam
     return np.where(idx)[0]
Beispiel #41
0
def pdf_between_data(train_data, input_data, std):
    """
    Compute PDF between two samples.

    Parameters
    ----------
    train_data : array
        Training dataset.

    input_data : array
        Input dataset

    std : float
        Standard deviation for Probability Density
        Function (PDF).

    Returns
    -------
    array-like
    """
    n_train_samples = train_data.shape[0]
    n_samples = input_data.shape[0]

    results = np.zeros((n_train_samples, n_samples))
    variance = std ** 2
    const = std * math.sqrt(2 * math.pi)

    for i, input_row in enumerate(input_data):
        inputs = np.tile(input_row, (n_train_samples, 1))
        class_difference = (train_data - inputs)
        total_distance = inner1d(class_difference, class_difference)
        results[:, i] = np.exp(-total_distance / variance) / const

    return results
 def _boost_real(self, iboost, X, y, sample_weight):
     estimator = self._make_estimator()
     try:
         estimator.set_params(random_state=self.random_state)
     except ValueError:
         pass
     estimator.fit(X, y, sample_weight=sample_weight)
     y_predict_proba = estimator.predict_proba(X)
     if iboost == 0:
         self.classes_ = getattr(estimator, 'classes_', None)
         self.n_classes_ = len(self.classes_)
     y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1), axis=0)
     # Instances incorrectly classified
     incorrect = y_predict != y
     # Error fraction
     estimator_error = np.mean(
         np.average(incorrect, weights=sample_weight, axis=0))
     # Stop if classification is perfect
     if estimator_error <= 0:
         return sample_weight, 1., 0.
     n_classes = self.n_classes_
     classes = self.classes_
     y_codes = np.array([-1. / (n_classes - 1), 1.])
     y_coding = y_codes.take(classes == y[:, np.newaxis])
     y_predict_proba[y_predict_proba <= 0] = 1e-5
     # Boost weight using multi-class AdaBoost SAMME.R alg
     estimator_weight = (-1. * self.learning_rate
                             * (((n_classes - 1.) / n_classes) *
                                inner1d(y_coding, np.log(y_predict_proba))))
     if not iboost == self.n_estimators - 1:
         # Only boost positive weights
         sample_weight *= np.exp(estimator_weight *
                                 ((sample_weight > 0) |
                                  (estimator_weight < 0)))
     return sample_weight, 1., estimator_error
Beispiel #43
0
 def sphere_selection(self, origin = None, radius = 1.0):
     """get a spherical subset of stars
     
     Arguments:
     origin -- origin of the spherical region. default: [0, 0, 0]
     radius -- radius in code units of the spherical region
     
     Returns:
     Nbody6Subset containing the stars in the requested mass range
     """
 
     if origin is not None and np.allclose(origin, self.dc_pos_new):
         rads = self.Radii_dcm_new
     if origin is not None and np.allclose(origin, self.dc_pos):
         rads = self.Radii_dcm
     if origin is None:
         rads = self.Radii
     else:
         posorigin = self.Pos - origin
         rads = np.array(np.sqrt(inner1d(posorigin, posorigin)))  
     selection = (rads < radius)
     if selection.sum() == 0:
         print 'No stars are in the sphere!'
         return None
     else:
         return Nbody6Subset(self, self.Names[selection])            
Beispiel #44
0
def test_local_coordsys(geom):
    '''Ensure local coordiante systems are orthonormal'''
    rot = transforms3d.euler.euler2mat(*(np.pi * 2 * np.random.rand(3)))
    g = geom({'rotation': rot,
              'position': np.random.rand(3)})

    x, y, z = g.get_local_euklid_bases(np.random.rand(5, 2))

    assert np.allclose(inner1d(x, y), 0)
    assert np.allclose(inner1d(x, z), 0)

    for vec in [x, y, z]:
        assert np.allclose(np.linalg.norm(vec, axis=1), 1.)

    # Check it's a right-handed coordinage system
    assert np.allclose(np.cross(x[:, :3], y[:, :3]), z[:, :3])
def cal_rms( matA, matB ):
    L = matA.shape[0]
    if (L<1): return 0
    dx = matA - matB
    dx2 = inner1d(dx,dx)

    return np.sqrt( sum(dx2)/L )
Beispiel #46
0
 def calc_new_density_center(self, axes = [0, 1, 2]): 
     """Calculate the density center of a set of stars
     
     Uses the method of Casertano & Hut ApJ 1985, 298, 80. 
     
     Arguments:
     axes -- use the full 3d information (default mode) or pass in two axes to 
             use the projection along the missing axis.
     """
     positions = self.Pos[:, axes]
     #  get a nearest neighbor tree  
     kdtree = spatial.cKDTree(positions)    
     # the first result is the point itself, so sixth neighbor is the seventh result
     near6 = kdtree.query(positions, 7)[0][:,6]
     vols = np.pi * near6**2
     densities = 5.0 / vols
 
     # density center is density weighted radius of the stars
     self.dc_pos_new = (densities[:,np.newaxis] * positions).sum(0) / densities.sum()    
     self.dc_vel_new = (densities[:,np.newaxis] * 
         self.Vel[:, axes]).sum(0) / densities.sum()
         
     # update radii from density center
     poscm = self.Pos - self.dc_pos_new
     self.Radii_dcm_new = np.array(np.sqrt(inner1d(poscm, poscm)))  
    def approx_predictive_ll(self, Arow, Acol, Wrow, Wcol, M=100):
        """
        Approximate the (marginal) predictive probability by averaging over M
        samples of the predictive parameters
        """
        from scipy.stats import multivariate_normal
        from numpy.core.umath_tests import inner1d
        import scipy.linalg

        N, B = self.N, self.B
        assert Arow.shape == Acol.shape == (N + 1,)

        # Get the predictive parameters
        lps = np.zeros(M)
        for m in xrange(M):
            Murow, Mucol, Lrow, Lcol = self.sample_predictive_parameters()

            # for n in xrange(N+1):
            #     if Arow[n]:
            #         lps[m] += multivariate_normal(Murow[n], Sigrow[n]).pdf(Wrow[n])
            #
            #     if n < N and Acol[n]:
            #         lps[m] += multivariate_normal(Mucol[n], Sigcol[n]).pdf(Wcol[n])

            # Sigrow_chol = np.array([np.linalg.cholesky(S) for S in Sigrow])
            # Sigcol_chol = np.array([np.linalg.cholesky(S) for S in Sigcol])

            for n in xrange(N + 1):
                if Arow[n]:
                    L = Lrow[n]
                    x = Wrow[n] - Murow[n]
                    xs = scipy.linalg.solve_triangular(L, x.T, lower=True)
                    lps[m] += (
                        -1.0 / 2.0 * inner1d(xs.T, xs.T) - B / 2.0 * np.log(2 * np.pi) - np.log(L.diagonal()).sum()
                    )

                if n < N and Acol[n]:
                    L = Lcol[n]
                    x = Wcol[n] - Mucol[n]
                    xs = scipy.linalg.solve_triangular(L, x.T, lower=True)
                    lps[m] += (
                        -1.0 / 2.0 * inner1d(xs.T, xs.T) - B / 2.0 * np.log(2 * np.pi) - np.log(L.diagonal()).sum()
                    )

        # Compute average log probability
        lp = -np.log(M) + logsumexp(lps)
        return lp
Beispiel #48
0
def get_lambda_max(X,y):
    """ 
    Find the value of lambda at which all coefficients are set to zero
    by finding the minimum value such that 0 is in the subdifferential
    and the coefficients are all zero.
    """
    subgrads = np.fabs( inner1d(X.T, y))
    return np.max( subgrads )
Beispiel #49
0
def multivariate_t_loglik(y,nu,mu,lmbda):
    # returns the log value
    d = len(mu)
    yc = np.array(y-mu,ndmin=2)
    ys, LT = general.solve_chofactor_system(lmbda,yc.T,overwrite_b=True)
    return scipy.special.gammaln((nu+d)/2.) - scipy.special.gammaln(nu/2.) \
            - (d/2.)*np.log(nu*np.pi) - np.log(LT.diagonal()).sum() \
            - (nu+d)/2.*np.log1p(1./nu*inner1d(ys.T,ys.T))
Beispiel #50
0
def multivariate_t_loglik(y,nu,mu,lmbda):
    # returns the log value
    d = len(mu)
    yc = np.array(y-mu,ndmin=2)
    L = np.linalg.cholesky(lmbda)
    ys = scipy.linalg.solve_triangular(L,yc.T,overwrite_b=True,lower=True)
    return scipy.special.gammaln((nu+d)/2.) - scipy.special.gammaln(nu/2.) \
            - (d/2.)*np.log(nu*np.pi) - np.log(L.diagonal()).sum() \
            - (nu+d)/2.*np.log1p(1./nu*inner1d(ys.T,ys.T))
Beispiel #51
0
def angleBetween(v1, v2):
    """ Return the angles in radians between two unit vector arrays.
        Angles are in [0,pi]. """
    # When vectors are equal their dot product is 1, but
    # due to rounding it can be slightly above 1 which would then
    # result in arccos returning NaN. Therefore we clip the values to [-1,1]
    dot = np.clip(inner1d(v1, v2), -1, 1)
    angle = np.arccos(dot)
    return angle
Beispiel #52
0
    def intersect(self, dir, pos):
        '''Calculate the intersection point between a ray and the element

        Parameters
        ----------
        dir : `numpy.ndarray` of shape (N, 4)
            homogeneous coordinates of the direction of the ray
        pos : `numpy.ndarray` of shape (N, 4)
            homogeneous coordinates of a point on the ray

        Returns
        -------
        intersect :  boolean array of length N
            ``True`` if an intersection point is found.
        interpos : `numpy.ndarray` of shape (N, 4)
            homogeneous coordinates of the intersection point. Values are set
            to ``np.nan`` if no intersection point is found.
        interpos_local : `numpy.ndarray` of shape (N, 2)
            y and z coordinates in the coordinate system of the active plane.
        '''
        p_rays = pluecker.dir_point2line(h2e(dir), h2e(pos))
        radius = np.linalg.norm(self.geometry['v_y'])
        height = np.linalg.norm(self.geometry['v_x'])
        intersect = np.zeros(pos.shape[0], dtype=bool)

        # ray passes through cylinder caps?
        for fac in [-1, 1]:
            cap_midpoint = self.geometry['center'] + fac * self.geometry['v_x']
            cap_plane = pluecker.point_dir2plane(cap_midpoint,
                                                 self.geometry['e_x'])
            interpos = pluecker.intersect_line_plane(p_rays, cap_plane)
            r = np.linalg.norm(h2e(cap_midpoint) - h2e(interpos), axis=-1)
            intersect[r < radius]  = True

        # Ray passes through the side of a cylinder
        # Note that we don't worry about rays parallel to x because those are
        # tested by passing through the caps already
        n = norm_vector(np.cross(h2e(self.geometry['e_x']), h2e(dir)))
        d = np.abs(inner1d(n, h2e(self.geometry['center']) - h2e(pos)))
        n2 = norm_vector(np.cross(h2e(dir), n))
        k = inner1d(h2e(pos) - h2e(self.geometry['center']), n2) / inner1d(h2e(self.geometry['e_x']), n2)
        intersect[(d < radius) & (np.abs(k) < height)] = True

        return intersect, None, None
Beispiel #53
0
    def order_sign_convention(self, p, e_perp_groove):
        '''Convention to chose the sign for CAT grating orders

        Blazing happens on the side of the negative orders. Obviously, this
        convention is only meaningful if the photons do not arrive perpendicular to the grating.
        '''
        dotproduct = inner1d(p, e_perp_groove)
        sign = np.sign(dotproduct)
        sign[sign == 0] = 1
        return sign
Beispiel #54
0
    def expected_log_likelihood(self,x):
        mu_n, sigma_n, kappa_n, nu_n = self._mu_mf, self._sigma_mf, self._kappa_mf, self._nu_mf
        D = self.D
        x = np.reshape(x,(-1,D)) - mu_n # x is now centered
        chol = self._get_sigma_mf_chol()
        xs = util.general.solve_triangular(chol,x.T,overwrite_b=True)

        # see Eqs. 10.64, 10.67, and 10.71 in Bishop
        return self._loglmbdatilde()/2 - D/(2*kappa_n) - nu_n/2 * \
                inner1d(xs.T,xs.T) - D/2*np.log(2*np.pi)
    def test_endian(self):
        msg = "big endian"
        a = np.arange(6, dtype='>i4').reshape((2,3))
        assert_array_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1), err_msg=msg)
        msg = "little endian"
        a = np.arange(6, dtype='<i4').reshape((2,3))
        assert_array_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1), err_msg=msg)

        # Output should always be native-endian
        Ba = np.arange(1, dtype='>f8')
        La = np.arange(1, dtype='<f8')
        assert_equal((Ba+Ba).dtype, np.dtype('f8'))
        assert_equal((Ba+La).dtype, np.dtype('f8'))
        assert_equal((La+Ba).dtype, np.dtype('f8'))
        assert_equal((La+La).dtype, np.dtype('f8'))

        assert_equal(np.absolute(La).dtype, np.dtype('f8'))
        assert_equal(np.absolute(Ba).dtype, np.dtype('f8'))
        assert_equal(np.negative(La).dtype, np.dtype('f8'))
        assert_equal(np.negative(Ba).dtype, np.dtype('f8'))
Beispiel #56
0
def source_selection_focused(ns, x0, xs):
    """Secondary source selection for a focused source.

    Eq.(2.78) from :cite:`Wierstorf2014`

    """
    x0 = util.asarray_of_rows(x0)
    xs = util.asarray_1d(xs)
    ns = util.normalize_vector(ns)
    ds = xs - x0
    return inner1d(ns, ds) >= defs.selection_tolerance
Beispiel #57
0
def memory_efficient_inner1d(fst_arr, fst_indices, snd_arr, snd_indices):
    _, T = fst_arr.shape
    size = len(fst_indices)
    result = np.zeros(size)
    batch_size = MAX_INNER1D_ELEMENTS / T
    start = 0
    while start < size:
        finish = min(start + batch_size, size)
        result[start:finish] = inner1d(fst_arr[fst_indices[start:finish], :], snd_arr[snd_indices[start:finish], :])
        start = finish
    return result
Beispiel #58
0
def source_selection_point(n0, x0, xs):
    """Secondary source selection for a point source.

    Eq.(15) from :cite:`Spors2008`

    """
    n0 = util.asarray_of_rows(n0)
    x0 = util.asarray_of_rows(x0)
    xs = util.asarray_1d(xs)
    ds = x0 - xs
    return inner1d(ds, n0) >= defs.selection_tolerance
Beispiel #59
0
 def test_broadcast(self):
     msg = "broadcast"
     a = np.arange(4).reshape((2, 1, 2))
     b = np.arange(4).reshape((1, 2, 2))
     assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
     msg = "extend & broadcast loop dimensions"
     b = np.arange(4).reshape((2, 2))
     assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
     # Broadcast in core dimensions should fail
     a = np.arange(8).reshape((4, 2))
     b = np.arange(4).reshape((4, 1))
     assert_raises(ValueError, umt.inner1d, a, b)
     # Extend core dimensions should fail
     a = np.arange(8).reshape((4, 2))
     b = np.array(7)
     assert_raises(ValueError, umt.inner1d, a, b)
     # Broadcast should fail
     a = np.arange(2).reshape((2, 1, 1))
     b = np.arange(3).reshape((3, 1, 1))
     assert_raises(ValueError, umt.inner1d, a, b)
Beispiel #60
0
 def backward(self, bottom, top, propagate_down):
     """Computes the backward pass."""
     if not propagate_down:
         return 0.
     top_diff = top[0].diff()
     prob = top[0].data()
     bottom_diff = bottom[0].init_diff(setzero=False)
     bottom_diff[:] = top_diff
     cross_term = inner1d(top_diff, prob)
     bottom_diff -= cross_term[:, np.newaxis]
     bottom_diff *= prob
     return 0.