def denoise(im, U_init, tolerance=0.1, tau=0.125, tv_weight=100):
    """ An implementation of the Rudin-Osher-Fatemi (ROF) denoising model
    using the numerical procedure presented in eq (11) A. Chambolle (2005).
    Input: noisy input image (grayscale), initial guess for U, weight of
    the TV-regularizing term, steplength, tolerance for stop criterion.
    Output: denoised and detextured image, texture residual. """
    m, n = np.shape(im)  # size of noisy image
    # initialize
    U = U_init
    Px = im  # x-component to the dual field
    Py = im  # y-component of the dual field
    error = 1
    while error > tolerance:
        Uold = U
        # gradient of primal variable
        GradUx = np.roll(U, -1, axis=1) - U  # x-component of U’s gradient
        GradUy = np.roll(U, -1, axis=0) - U  # y-component of U’s gradient
        # update the dual varible
        PxNew = Px + (tau / tv_weight) * GradUx
        PyNew = Py + (tau / tv_weight) * GradUy
        NormNew = np.maximum(1, np.sqrt(PxNew ** 2 + PyNew ** 2))
        Px = PxNew / NormNew  # update of x-component (dual)
        Py = PyNew / NormNew  # update of y-component (dual)
        # update the primal variable
        RxPx = np.roll(Px, 1, axis=1)  # right x-translation of x-component
        RyPy = np.roll(Py, 1, axis=0)  # right y-translation of y-component
        DivP = (Px - RxPx) + (Py - RyPy)  # divergence of the dual field.
        U = im + tv_weight * DivP  # update of the primal variable
        # update of error
        error = np.linalg.norm(U - Uold) / np.sqrt(n * m)
    return U, im - U  # denoised image and texture residual
def correlateTimeseries(A, B):

    # Convert the time series to relative time
    aDate = A["date"] - A["date"].iat[0]
    bDate = B["date"] - B["date"].iat[0]

    # Prepare indices for matched data points
    datesMatched = np.searchsorted(aDate, bDate)
    l = len(aDate) - 1
    datesMatched[datesMatched > l] = l
    c = dict()
    keyword = "price"
    # Select data according to matched indices
    a = np.array(A[keyword].values)
    aReduced = a[datesMatched]
    bReduced = np.array(B[keyword].values)
    # Correct to the baseline
    aReduced = aReduced - np.mean(aReduced)
    bReduced = bReduced - np.mean(bReduced)
    # Perform the z-transformation
    zA = aReduced / np.sqrt(np.sum(np.square(aReduced)) / l)
    zB = bReduced / np.sqrt(np.sum(np.square(bReduced)) / l)
    # Calculate the correlation
    r = pearsonr(zA, zB)
    return r[1]
Example #3
1
def lineIntegralZ3D(p1, p2, n):
    """
        Holstein1999
    """

    t = (p2 - p1) / (p2 - p1).abs()
    v = p1.dot(n)

    h = p1.dot(t.cross(n))

    #    r0 = np.sqrt(v**2 + h**2)
    l1 = p1.dot(t)
    l2 = p2.dot(t)

    r1 = np.sqrt(v ** 2 + h ** 2 + l1 ** 2)
    r2 = np.sqrt(v ** 2 + h ** 2 + l2 ** 2)

    L = l2 - l1  # edgelength

    # vertex method
    ch = h * np.sign(l1 + l2) * np.log((r2 + abs(l2)) / (r1 + abs(l1)))  # (51)
    atanLambda = np.arctan2(2.0 * h * L, (r1 + r2 + L) * (r1 + r2 - L) + 2.0 * abs(v) * (r2 + r1))  # (39)

    #     line method
    #    A = abs( L ) / ( r2 + r1 ) # (46)
    #    S = 0.5 * ( r1 + r2 - abs( L ) * A ) # (47)
    #
    #    atanLambda = np.arctan2(  ( h * A ), ( abs(v) * S ) ) #(48)
    #    ch = 2. * h * arctanh( L ) # (55)

    return ch - 2.0 * abs(v) * atanLambda  # (50)
    def rand(self):

        m, n = self.__m, self.__n

        s = linalg.cholesky(self.__prod).transpose()
        w = self.__weight

        # Compute the parameters of the posterior distribution.
        mu = linalg.solve(s[:m, :m], s[:m, m:])
        omega = np.dot(s[:m, :m].transpose(), s[:m, :m])
        sigma = np.dot(s[m:, m:].transpose(), s[m:, m:]) / w
        eta = w

        # Simulate the marginal Wishart distribution.
        f = linalg.solve(
            np.diag(np.sqrt(2.0 * random.gamma((eta - np.arange(n)) / 2.0))) + np.tril(random.randn(n, n), -1),
            np.sqrt(eta) * linalg.cholesky(sigma).transpose(),
        )
        b = np.dot(f.transpose(), f)

        # Simulate the conditional Gauss distribution.
        a = mu + linalg.solve(
            linalg.cholesky(omega).transpose(), np.dot(random.randn(m, n), linalg.cholesky(b).transpose())
        )

        return a, b
Example #5
1
def perform_scaling(data, electrode_indices):
    """This function, suprisingly, performs scaling on a given set of data.

    The data is returned as a dictionary of 8 differently scaled data sets. The
    8 data sets represents all combinations of the following scaling
    methodologies:
        'Vector' of 'MinMax' scaling
        'Within'- or 'Across'-subject scaling
        'All' or 'Few' electrodes included in scaling

    The data can be accessed in the following way:
    
    data_scaled = perform_scaling(data, electrode_indices)
    print(data_scaled['Vector']['Across']['All']
    
    """
    # TODO: Should the scaled for selected electrodes data retain all the
    # unscaled values? I.e. for compatibility with other scripts - i.e. the
    # electrode numbers will be all screwed up...
    data_scaled = {
        "Vector": {"Across": {"All": None, "Few": None}, "Within": {"All": None, "Few": None}},
        "MaxMin": {"Across": {"All": None, "Few": None}, "Within": {"All": None, "Few": None}},
    }

    mean_all = mean(data, 0)
    mean_few = mean(data[:, electrode_indices], 0)

    data_scaled["Vector"]["Across"]["All"] = copy(data) / sqrt(vdot(mean_all, mean_all))
    data_scaled["Vector"]["Across"]["Few"] = copy(data) / sqrt(vdot(mean_few, mean_few))

    min_point = min(mean_all)
    max_point = max(mean_all)
    diff = max_point - min_point
    data_scaled["MaxMin"]["Across"]["All"] = (copy(data) - min_point) / diff
    min_point = min(mean_few)
    max_point = max(mean_few)
    diff = max_point - min_point
    data_scaled["MaxMin"]["Across"]["Few"] = (copy(data) - min_point) / diff

    data_scaled["Vector"]["Within"]["All"] = zeros(data.shape)
    data_scaled["Vector"]["Within"]["Few"] = zeros(data.shape)
    data_scaled["MaxMin"]["Within"]["All"] = zeros(data.shape)
    data_scaled["MaxMin"]["Within"]["Few"] = zeros(data.shape)
    for i in range(data.shape[0]):
        data_scaled["Vector"]["Within"]["All"][i, :] = copy(data[i, :]) / sqrt(vdot(data[i, :], data[i, :]))
        data_scaled["Vector"]["Within"]["Few"][i, :] = copy(data[i, :]) / sqrt(
            vdot(data[i, electrode_indices], data[i, electrode_indices])
        )

        min_point = min(data[i, :])
        max_point = max(data[i, :])
        diff = max_point - min_point
        data_scaled["MaxMin"]["Within"]["All"][i, :] = (copy(data[i, :]) - min_point) / diff

        min_point = min(data[i, electrode_indices])
        max_point = max(data[i, electrode_indices])
        diff = max_point - min_point
        data_scaled["MaxMin"]["Within"]["Few"][i, :] = (copy(data[i, :]) - min_point) / diff

    return data_scaled
Example #6
1
def correlation_matrix_quadrature(a1, a2, rho=None):
    """
    Calculate the quadrature correlation matrix with given field operators
    :math:`a_1` and :math:`a_2`. If a density matrix is given the expectation
    values are calculated, otherwise a matrix with operators is returned.

    Parameters
    ----------

    a1 : :class:`qutip.qobj.Qobj`
        Field operator for mode 1.

    a2 : :class:`qutip.qobj.Qobj`
        Field operator for mode 2.

    rho : :class:`qutip.qobj.Qobj`
        Density matrix for which to calculate the covariance matrix.

    Returns
    -------

    corr_mat: *array* of complex numbers or :class:`qutip.qobj.Qobj`
        A 2-dimensional *array* of covariance values for the field quadratures,
        or, if rho=0, a matrix of operators.

    """
    x1 = (a1 + a1.dag()) / np.sqrt(2)
    p1 = -1j * (a1 - a1.dag()) / np.sqrt(2)
    x2 = (a2 + a2.dag()) / np.sqrt(2)
    p2 = -1j * (a2 - a2.dag()) / np.sqrt(2)

    basis = [x1, p1, x2, p2]

    return correlation_matrix(basis, rho)
Example #7
1
def four_point_transform(image, pts):  # original_points):
    """Take a rotate patch from an image and straightens it. 
    """
    # obtain a consistent order of the points and unpack them
    # individually
    # pts = np.copy(original_points)
    rect = order_points(pts)
    (tl, tr, br, bl) = rect
    # compute the width of the new image, which will be the
    # maximum distance between bottom-right and bottom-left
    # x-coordiates or the top-right and top-left x-coordinates
    widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
    widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
    maxWidth = max(int(widthA), int(widthB))

    # compute the height of the new image, which will be the
    # maximum distance between the top-right and bottom-right
    # y-coordinates or the top-left and bottom-left y-coordinates
    heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
    heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
    maxHeight = max(int(heightA), int(heightB))

    # now that we have the dimensions of the new image, construct
    # the set of destination points to obtain a "birds eye view",
    # (i.e. top-down view) of the image, again specifying points
    # in the top-left, top-right, bottom-right, and bottom-left
    # order
    dst = np.array([[0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1], [0, maxHeight - 1]], dtype="float32")

    # compute the perspective transform matrix and then apply it
    M = cv2.getPerspectiveTransform(rect, dst)
    warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))

    # return the warped image
    return warped
def align_magnetism(m, vectors):
    """ Rotates a matrix, to align its components with the direction
  of the magnetism """
    if not len(m) == 2 * len(vectors):  # stop if they don't have
        # compatible dimensions
        raise
    # pauli matrices
    from scipy.sparse import csc_matrix, bmat

    sx = csc_matrix([[0.0, 1.0], [1.0, 0.0]])
    sy = csc_matrix([[0.0, -1j], [1j, 0.0]])
    sz = csc_matrix([[1.0, 0.0], [0.0, -1.0]])
    n = len(m) / 2  # number of sites
    R = [[None for i in range(n)] for j in range(n)]  # rotation matrix
    from scipy.linalg import expm  # exponenciate matrix

    for (i, v) in zip(range(n), vectors):  # loop over sites
        vv = np.sqrt(v.dot(v))  # norm of v
        if vv > 0.000001:  # if nonzero scale
            u = v / vv
        else:  # if zero put to zero
            u = np.array([0.0, 0.0, 0.0])
        #    rot = u[0]*sx + u[1]*sy + u[2]*sz
        uxy = np.sqrt(u[0] ** 2 + u[1] ** 2)  # component in xy plane
        phi = np.arctan2(u[1], u[0])
        theta = np.arctan2(uxy, u[2])
        r1 = phi * sz / 2.0  # rotate along z
        r2 = theta * sy / 2.0  # rotate along y
        # a factor 2 is taken out due to 1/2 of S
        rot = expm(1j * r2) * expm(1j * r1)
        R[i][i] = rot  # save term
    R = bmat(R)  # convert to full sparse matrix
    mout = R * csc_matrix(m) * R.H  # rotate matrix
    return mout.todense()  # return dense matrix
def svdUpdate(U, S, V, a, b):
    """
    Update SVD of an (m x n) matrix `X = U * S * V^T` so that
    `[X + a * b^T] = U' * S' * V'^T`
    and return `U'`, `S'`, `V'`.
    
    `a` and `b` are (m, 1) and (n, 1) rank-1 matrices, so that svdUpdate can simulate 
    incremental addition of one new document and/or term to an already existing 
    decomposition.
    """
    rank = U.shape[1]
    m = U.T * a
    p = a - U * m
    Ra = numpy.sqrt(p.T * p)
    assert float(Ra) > 1e-10
    P = (1.0 / float(Ra)) * p
    n = V.T * b
    q = b - V * n
    Rb = numpy.sqrt(q.T * q)
    assert float(Rb) > 1e-10
    Q = (1.0 / float(Rb)) * q

    K = numpy.matrix(numpy.diag(list(numpy.diag(S)) + [0.0])) + numpy.bmat("m ; Ra") * numpy.bmat(" n; Rb").T
    u, s, vt = numpy.linalg.svd(K, full_matrices=False)
    tUp = numpy.matrix(u[:, :rank])
    tVp = numpy.matrix(vt.T[:, :rank])
    tSp = numpy.matrix(numpy.diag(s[:rank]))
    Up = numpy.bmat("U P") * tUp
    Vp = numpy.bmat("V Q") * tVp
    Sp = tSp
    return Up, Sp, Vp
    def __init__(self, d, cat, vocab, alpha, words_vectors, lambdaW, lambdaCat, lambdaL, iter):
        # initialse parameters to be uniform distribution [-r,r]
        # where r is a small number 0.01
        r = np.sqrt(6) / np.sqrt(2 * d + 1)
        # W1 and W2 are dx2d matrices
        # W1 for learning W2 for reconstruction
        # 2d+1 for the bias
        self.W1 = np.random.rand(d, d) * 2 * r - r
        self.W2 = np.random.rand(d, d) * 2 * r - r
        # self.b1=np.random.rand(d,1)*2*r-r
        self.b1 = np.zeros([d, 1])
        self.W3 = np.random.rand(d, d) * 2 * r - r
        self.W4 = np.random.rand(d, d) * 2 * r - r
        self.b2 = np.zeros([d, 1])
        self.b3 = np.zeros([d, 1])
        # self.b2=np.random.rand(d,1)*2*r-r
        # self.b3=np.random.rand(d,1)*2*r-r
        # Wlab for learning sentiment labels
        self.Wlab = np.random.rand(cat, d) * 2 * r - r
        self.blab = np.zeros([cat, 1])
        # self.blab=np.random.rand(cat,1)*2*r-r
        # Wrep for learning the word vector representation
        self.WL = (np.random.rand(d, vocab) * 2 * r - r) * 10 ** (-3)

        self.alpha = alpha
        self.cat = cat
        self.d = d
        self.vocab = vocab
        self.words_vectors = words_vectors
        self.lambdaW = lambdaW
        self.lambdaCat = lambdaCat
        self.lambdaL = lambdaL
        self.postClassifier = LogisticRegression(penalty="l2", multi_class="multinomial", C=10 ** 6, solver="lbfgs")
        self.iter = iter
Example #11
0
    def ResMatS(self, H, K, L, W, EXP):
        # [len,H,K,L,W,EXP]=CleanArgs(H,K,L,W,EXP);
        x = self.x
        y = self.y
        z = self.z
        Q = self.modvec(H, K, L, "latticestar")
        uq = N.zeros((3, self.npts), "d")
        uq[0, :] = H / Q
        #% Unit vector along Q
        uq[1, :] = K / Q
        uq[2, :] = L / Q
        xq = self.scalar(x[0, :], x[1, :], x[2, :], uq[0, :], uq[1, :], uq[2, :], "latticestar")
        yq = self.scalar(y[0, :], y[1, :], y[2, :], uq[0, :], uq[1, :], uq[2, :], "latticestar")
        zq = 0
        # %scattering vector assumed to be in (self.orient1,self.orient2) plane;
        tmat = N.zeros((4, 4, self.npts))
        #%Coordinate transformation matrix
        tmat[3, 3, :] = 1
        tmat[2, 2, :] = 1
        tmat[0, 0, :] = xq
        tmat[0, 1, :] = yq
        tmat[1, 1, :] = xq
        tmat[1, 0, :] = -yq

        RMS = N.zeros((4, 4, self.npts))
        rot = N.zeros((3, 3))
        EXProt = EXP

        #        %Sample shape matrix in coordinate system defined by scattering vector
        for i in range(self.npts):
            sample = EXP[i]["sample"]
            if "shape" in sample:
                rot[0, 0] = tmat[0, 0, i]
                rot[1, 0] = tmat[1, 0, i]
                rot[0, 1] = tmat[0, 1, i]
                rot[1, 1] = tmat[1, 1, i]
                rot[2, 2] = tmat[2, 2, i]
                EXProt[i]["sample"]["shape"] = N.dot(rot, N.dot(sample["shape"], rot.T))

        R0, RM = self.ResMat(Q, W, EXProt)

        for i in range(self.npts):
            RMS[:, :, i] = N.dot((tmat[:, :, i]).transpose(), N.dot(RM[:, :, i], tmat[:, :, i]))

        mul = N.zeros((4, 4))
        e = N.eye(4, 4)
        for i in range(self.npts):
            if "Smooth" in EXP[i]:
                if "X" in (EXP[i]["Smooth"]):
                    mul[0, 0] = 1.0 / (EXP[i]["Smooth"]["X"] ** 2 / 8 / N.log(2))
                    mul[1, 1] = 1.0 / (EXP[i]["Smooth"]["Y"] ** 2 / 8 / N.log(2))
                    mul[2, 2] = 1.0 / (EXP[i]["Smooth"]["E"] ** 2 / 8 / N.log(2))
                    mul[3, 3] = 1.0 / (EXP[i]["Smooth"]["Z"] ** 2 / 8 / N.log(2))
                    R0[i] = (
                        R0[i]
                        / N.sqrt(N.linalg.det(e / RMS[:, :, i]))
                        * N.sqrt(N.linalg.det(e / mul + e / RMS[:, :, i]))
                    )
                    RMS[:, :, i] = e / (e / mul + e / RMS[:, :, i])
        return R0, RMS
Example #12
0
def fir_root_raised_cosine(num_taps, sample_rate, beta, symbol_period):
    h = []

    assert (num_taps % 2) == 1, "Number of taps must be odd."

    for i in range(num_taps):
        t = (i - (num_taps - 1) / 2) / sample_rate

        if t == 0:
            h.append((1 / (numpy.sqrt(symbol_period))) * (1 - beta + 4 * beta / numpy.pi))
        elif numpy.isclose(t, -symbol_period / (4 * beta)) or numpy.isclose(t, symbol_period / (4 * beta)):
            h.append(
                (beta / numpy.sqrt(2 * symbol_period))
                * (
                    (1 + 2 / numpy.pi) * numpy.sin(numpy.pi / (4 * beta))
                    + (1 - 2 / numpy.pi) * numpy.cos(numpy.pi / (4 * beta))
                )
            )
        else:
            num = numpy.cos((1 + beta) * numpy.pi * t / symbol_period) + numpy.sin(
                (1 - beta) * numpy.pi * t / symbol_period
            ) / (4 * beta * t / symbol_period)
            denom = 1 - (4 * beta * t / symbol_period) * (4 * beta * t / symbol_period)
            h.append(((4 * beta) / (numpy.pi * numpy.sqrt(symbol_period))) * num / denom)

    h = numpy.array(h) / numpy.sum(h)

    return h.astype(numpy.float32)
Example #13
0
    def _calc_cost(self, t0_strms, t1_strms):
        x0 = t0_strms["xLocs"]
        x1 = t1_strms["xLocs"]

        y0 = t0_strms["yLocs"]
        y1 = t1_strms["yLocs"]

        dp = np.hypot(x0[:, np.newaxis] - x1[np.newaxis, :], y0[:, np.newaxis] - y1[np.newaxis, :])
        # NOTE: Mike Dixon's implementation has storm volume data, while
        #       ZigZag only has storm area.  Therefore, we will do a sqrt
        #       instead of a cubed-root.
        dv = np.abs(np.sqrt(t0_strms["sizes"][:, np.newaxis]) - np.sqrt(t1_strms["sizes"][np.newaxis, :]))

        C = (self.distWeight * dp) + (self.volWeight * dv)

        # Make sure the highcost is gonna be high enough
        # The total cost is going to be the sum of costs over
        # the assignments.  Since there will be at most min(C.shape)
        # assignments, then multiplying the number of possible assignments
        # by the maximum assignment cost should guarantee a high enough cost.
        # Multiplying by 10 is just for extra measure.
        self._highCost = (10 * (min(C.shape) * C.max())) if C.size > 0 else TITAN._fallback_cost

        # Just double-checking that _highCost will pass the reject_assoc() later.
        if not self._reject_assoc(self._highCost):
            # TODO: Need better safety here... I would likely prefer
            #  np.inf, maybe.
            # New NOTE: Because that wasn't good enough to satisfy
            #  _reject_assoc(), go back to the fall-back of 999999.
            self._highCost = TITAN._fallback_cost

        # For any possible association where the points are too far
        # apart, then return a very large value.  Otherwise, return
        # the calculated cost for that association.
        return np.where(self._reject_assoc(dp), self._highCost, C)
Example #14
0
def _ica_def(X, tol, g, gprime, fun_args, max_iter, w_init):
    """Deflationary FastICA using fun approx to neg-entropy function

    Used internally by FastICA.
    """

    n_components = w_init.shape[0]
    W = np.zeros((n_components, n_components), dtype=float)

    # j is the index of the extracted component
    for j in range(n_components):
        w = w_init[j, :].copy()
        w /= np.sqrt((w ** 2).sum())

        n_iterations = 0
        # we set lim to tol+1 to be sure to enter at least once in next while
        lim = tol + 1
        while (lim > tol) & (n_iterations < (max_iter - 1)):
            wtx = np.dot(w.T, X)
            gwtx = g(wtx, fun_args)
            g_wtx = gprime(wtx, fun_args)
            w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w

            _gs_decorrelation(w1, W, j)

            w1 /= np.sqrt((w1 ** 2).sum())

            lim = np.abs(np.abs((w1 * w).sum()) - 1)
            w = w1
            n_iterations = n_iterations + 1

        W[j, :] = w

    return W
Example #15
0
    def __init__(self, x, n_in, n_out, activation, W=None, b=None):
        self.n_in = n_in
        self.n_out = n_out
        self.input = x

        W_values = numpy.asarray(
            numpy.random.uniform(
                low=-numpy.sqrt(6.0 / (n_in + n_out)), high=numpy.sqrt(6.0 / (n_in + n_out)), size=(n_in, n_out)
            ),
            dtype=theano.config.floatX,
        )

        if W == None:
            self.W = theano.shared(W_values, name="W%dx%d" % (n_in, n_out))
        else:
            self.W = W

        if b == None:
            self.b = theano.shared(numpy.zeros((n_out,), dtype=theano.config.floatX), name="b")
        else:
            self.b = b

        self.linear_output = T.dot(x, self.W)

        self.output = activation(self.linear_output + self.b)
Example #16
0
    def initial_conditions(self, sim, hx, hy, hz):
        sim.rho[:] = 1.0

        if not self.config.stationary:
            return

        if self.config.drive == "pressure":
            pressure = self.pressure_delta
            if self.config.flow_direction == "x":
                sim.rho[:] = 1.0 + 3.0 * pressure * (self.gx / 2.0 - hx)
            elif self.config.flow_direction == "y":
                sim.rho[:] = 1.0 + 3.0 * pressure * (self.gy / 2.0 - hy)
            else:
                sim.rho[:] = 1.0 + 3.0 * pressure * (self.gz / 2.0 - hz)
        else:
            # Start with correct velocity profile.
            h = -0.5
            radius = self.get_chan_width() / 2.0

            if self.config.flow_direction == "z":
                rc = np.sqrt((hx - self.gx / 2.0 - h) ** 2 + (hy - self.gy / 2.0 - h) ** 2)
                self.sim.vz[rc <= radius] = self._velocity_profile(rc[rc <= radius])
            elif self.config.flow_direction == "y":
                rc = np.sqrt((hx - self.gx / 2.0 - h) ** 2 + (hz - self.gz / 2.0 - h) ** 2)
                self.sim.vy[rc <= radius] = self._velocity_profile(rc[rc <= radius])
            else:
                rc = np.sqrt((hz - self.gz / 2.0 - h) ** 2 + (hy - self.gy / 2.0 - h) ** 2)
                self.sim.vx[rc <= radius] = self._velocity_profile(rc[rc <= radius])
def classify(train, train_labels, test, test_labels, features=None):

    """Nearest neighbour classification"""

    # Use all feature is no feature parameter has been supplied
    if features is None:
        features = np.arange(0, train.shape[1])

    # Select the desired features from the training and test data
    train = train[:, features]
    test = test[:, features]

    # Super compact implementation of nearest neighbour
    x = np.dot(test, train.transpose())
    modtest = np.sqrt(np.sum(test * test, axis=1))
    modtrain = np.sqrt(np.sum(train * train, axis=1))
    dist = x / np.outer(modtest, modtrain.transpose())  # cosine distance
    nearest = np.argmax(dist, axis=1)
    mdist = np.max(dist, axis=1)
    label = train_labels[0, nearest]
    score = (100.0 * sum(test_labels[0, :] == label)) / label.shape[0]

    # Construct a confusion matrix
    nclasses = np.max(np.hstack((test_labels, train_labels)))
    confusions = np.zeros((nclasses, nclasses))
    for i in xrange(test_labels.shape[1]):
        confusions[test_labels[0, i] - 1, label[i] - 1] += 1

    return score, confusions
    def _get_response_A(position, area, nu, horn, secondary_beam):
        """
        Phase and transmission from the switches to the focal plane.

        Parameters
        ----------
        position : array-like of shape (..., 3)
            The 3D coordinates where the response is computed [m].
        area : array-like
            The integration area, in m^2.
        nu : float
            The frequency for which the response is computed [Hz].
        horn : PackedArray
            The horn layout.
        secondary_beam : Beam
            The secondary beam.

        Returns
        -------
        out : complex array of shape (#positions, #horns)
            The phase and transmission from the horns to the focal plane.

        """
        uvec = position / np.sqrt(np.sum(position ** 2, axis=-1))[..., None]
        thetaphi = Cartesian2SphericalOperator("zenith,azimuth")(uvec)
        sr = -area / position[..., 2] ** 2 * np.cos(thetaphi[..., 0]) ** 3
        tr = np.sqrt(secondary_beam(thetaphi[..., 0], thetaphi[..., 1]) * sr / secondary_beam.solid_angle)[..., None]
        const = 2j * np.pi * nu / c
        product = np.dot(uvec, horn[horn.open].center.T)
        return ne.evaluate("tr * exp(const * product)")
def delta(phase,inc, ecc = 0, omega=0):
    """
    Compute the distance center-to-center between planet and host star.
    ___

    INPUT:

    phase: orbital phase in radian
    inc: inclination of the system in radian

    OPTIONAL INPUT:

    ecc:
    omega:

    //
    OUTPUT:

    distance center-to-center, double-float number.
    ___


    """
    phase = 2*np.pi*phase
    if ecc == 0 and omega == 0:
        delta = np.sqrt(1-(np.cos(phase)**2)*(np.sin(inc)**2))
    else:
        delta = (1.-ecc**2.)/(1.-ecc*np.sin(phase-omega))* np.sqrt((1.-(np.cos(phase))**2.*(np.sin(inc))**2))

    return delta
Example #20
0
 def integrandCC(self, ct, s, phi, sigs2, sigx2, betx, bets, wcc):
     k = wcc / cst.clight * 2 * np.pi
     cos2 = np.cos(phi / 2) ** 2
     k2 = k ** 2
     sin2 = np.sin(phi / 2) ** 2
     sigmax2 = sigx2 * (1 + s ** 2 / betx ** 2)
     const = 1.0 / (np.pi * sigs2)
     return (
         const
         * np.exp(
             -ct ** 2 / sigs2
             - s ** 2 * cos2 / sigs2
             - sin2
             / (4 * k2 * sigmax2)
             * (
                 2
                 + 4 * k2 * s ** 2
                 - np.cos(2 * k * (s - ct))
                 - np.cos(2 * k * (s + ct))
                 - 8 * k * s * np.cos(k * ct) * np.sin(k * s)
                 - 4 * np.cos(k * s) ** 2 * np.sin(k * ct) ** 2
             )
         )
         / np.sqrt(1 + s ** 2 / betx ** 2)
         / np.sqrt(1 + s ** 2 / bets ** 2)
     )
Example #21
0
def transform2(lon, lat, lon0=0, R=1.0):
    """
    Returns the transformation of lon and lat
    on the Mollweide projection.

    Input
      lon: longitude
      lat: latitude
      lon0: central meridian
      R: radius of the globe

    Output
      x: x coordinate (origin at 0,0)
      y: y coordinate (origin at 0,0)
    """
    lon1 = lon - lon0
    if lon0 <> 0:
        if lon1 > 180:
            lon1 = -((180 + lon0) + (lon1 - 180))
        elif lon1 < -180:
            lon1 = (180 - lon0) - (lon1 + 180)
    theta = opt_theta(lat)
    x = sqrt(8.0) / pi * R * lon1 * cos(theta)
    x = radians(x)
    y = sqrt(2.0) * R * sin(theta)
    return x, y
Example #22
0
 def IBS_SRgrowth(self, beam):
     taux = (
         (
             16
             * beam._tuneb
             * (beam._epsxn ** 2)
             * np.sqrt(beam._kappa)
             * np.sqrt(beam._kappa + 1.0)
             * beam._gamma
             * beam._sigs
             * beam._dpp
         )
         / (2.0 * cst.clight * (cst.r0 ** 2) * beam._intensity * 23)
     ) / 3600.0
     steph = float(self._step) / 3600.0
     # Horizontal
     epsx1 = beam._epsxn * (1 + steph / taux) - 2 * steph / beam._taux_sr * (beam._epsxn - beam._epsx0)
     # Vertical
     tauy = (1.0 / beam._kappa_c) * taux
     epsy1 = beam._epsyn * (1 + steph / tauy) - 2 * steph / beam._tauy_sr * (beam._epsyn - beam._epsy0)
     # Longitudinal
     tauz = (1.0 + beam._kappa_c) * taux
     epss = beam._sigs * beam._dpp * (1 + steph / tauz) - 2 * steph / beam._tauz_sr * (
         beam._sigs * beam._dpp - beam._sigs0 * beam._dpp0
     )
     sigs1 = np.sqrt(epss * beam._sigs / beam._dpp)
     dpp1 = np.sqrt(epss * beam._dpp / beam._sigs)
     beam._epsxn = epsx1  # TODO: Bad smell: changing the parameter beam(vimaier)
     beam._epsyn = epsy1
     beam._sigs = sigs1
     beam._dpp = dpp1
Example #23
0
def logarithmic_negativity(V):
    """
    Calculate the logarithmic negativity given the symmetrized covariance
    matrix, see :func:`qutip.continous_variables.covariance_matrix`. Note that
    the two-mode field state that is described by `V` must be Gaussian for this
    function to applicable.

    Parameters
    ----------

    V : *2d array*
        The covariance matrix.

    Returns
    -------

    N: *float*, the logarithmic negativity for the two-mode Gaussian state
    that is described by the the Wigner covariance matrix V.

    """

    A = V[0:2, 0:2]
    B = V[2:4, 2:4]
    C = V[0:2, 2:4]

    sigma = np.linalg.det(A) + np.linalg.det(B) - 2 * np.linalg.det(C)
    nu_ = sigma / 2 - np.sqrt(sigma ** 2 - 4 * np.linalg.det(V)) / 2
    if nu_ < 0.0:
        return 0.0
    nu = np.sqrt(nu_)
    lognu = -np.log(2 * nu)
    logneg = max(0, lognu)

    return logneg
Example #24
0
def cubic(c, d):
    """
    Solve x**3 + c * x + d = 0
    """

    c = c.astype(np.complex)
    d = d.astype(np.complex)

    q = c / 3.0
    r = -d / 2.0

    delta = q ** 3 + r ** 2

    pos = delta >= 0.0

    s = np.zeros(c.shape, dtype=np.complex)
    t = np.zeros(c.shape, dtype=np.complex)

    if np.sum(pos) > 0:
        s[pos], t[pos] = delta_pos(r[pos], delta[pos])

    if np.sum(~pos) > 0:
        s[~pos], t[~pos] = delta_neg(r[~pos], q[~pos])

    x1 = s + t
    x2 = -(s + t) / 2.0 + np.sqrt(3.0) / 2.0 * (s - t) * np.complex(0.0, 1.0)
    x3 = -(s + t) / 2.0 - np.sqrt(3.0) / 2.0 * (s - t) * np.complex(0.0, 1.0)

    return x1, x2, x3
Example #25
0
    def setUp(self):
        """creates inputs"""
        # an L
        self.data1 = array([[1, 3], [1, 2], [1, 1], [2, 1]], "d")

        # a larger, shifted, mirrored L
        self.data2 = array([[4, -2], [4, -4], [4, -6], [2, -6]], "d")

        # an L shifted up 1, right 1, and with point 4 shifted an extra .5
        # to the right
        # pointwise distance disparity with data1: 3*(2) + (1 + 1.5^2)
        self.data3 = array([[2, 4], [2, 3], [2, 2], [3, 2.5]], "d")

        # data4, data5 are standardized (trace(A*A') = 1).
        # procrustes should return an identical copy if they are used
        # as the first matrix argument.
        shiftangle = pi / 8
        self.data4 = array([[1, 0], [0, 1], [-1, 0], [0, -1]], "d") / sqrt(4)
        self.data5 = array(
            [
                [cos(shiftangle), sin(shiftangle)],
                [cos(pi / 2 - shiftangle), sin(pi / 2 - shiftangle)],
                [-cos(shiftangle), -sin(shiftangle)],
                [-cos(pi / 2 - shiftangle), -sin(pi / 2 - shiftangle)],
            ],
            "d",
        ) / sqrt(4)
Example #26
0
def velocity_glon_glat(x, y, z, vx, vy, vz):
    """
    Compute projected angular velocity in galactic coordinates.

    Parameters
    ----------
    x : `~astropy.units.Quantity`
        Position in x direction
    y : `~astropy.units.Quantity`
        Position in y direction
    z : `~astropy.units.Quantity`
        Position in z direction
    vx : `~astropy.units.Quantity`
        Velocity in x direction
    vy : `~astropy.units.Quantity`
        Velocity in y direction
    vz : `~astropy.units.Quantity`
        Velocity in z direction

    Returns
    -------
    v_glon : `~astropy.units.Quantity`
        Projected velocity in Galactic longitude
    v_glat : `~astropy.units.Quantity`
        Projected velocity in Galactic latitude
    """
    y_prime = y + d_sun_to_galactic_center
    d = np.sqrt(x ** 2 + y_prime ** 2 + z ** 2)
    r = np.sqrt(x ** 2 + y_prime ** 2)

    v_glon = (-y_prime * vx + x * vy) / r ** 2
    v_glat = vz / (np.sqrt(1 - (z / d) ** 2) * d) - np.sqrt(vx ** 2 + vy ** 2 + vz ** 2) * z / (
        (np.sqrt(1 - (z / d) ** 2) * d ** 2)
    )
    return v_glon * Unit("rad"), v_glat * Unit("rad")
def findLatticeConstant(calc):
    """
    temporary copy of Alex's LatticeConstantCubicEnergy Test
    in the future we want to look up the result of this as input to the test
    """

    XTOL = 1e-8

    nn_dist_lookup = {"sc": 1.0, "fcc": 1.0 / np.sqrt(2), "bcc": np.sqrt(3) / 2.0, "diamond": np.sqrt(3) / 4.0}

    nn_dist = nn_dist_lookup[lattice]

    atoms = bulk(symbol, lattice, a=100)
    atoms.set_calculator(calc)
    cutoff = KIM_API_get_data_double(calc.pkim, "cutoff")[0]

    min_a = (cutoff / 30.0) / nn_dist
    max_a = cutoff / nn_dist

    aopt, eopt, ier, funccalls = opt.fminbound(calcEnergy, min_a, max_a, args=(calc,), full_output=True, xtol=XTOL)

    # results = opt.fmin(calcEnergy, cutoff/2.0, args=(calc,))[0]

    hit_bound = False
    if np.allclose(aopt, min_a, atol=2 * XTOL):
        hit_bound = True
    elif np.allclose(aopt, max_a, atol=2 * XTOL):
        hit_bound = True

    if hit_bound:
        raise Exception("Lattice constant computation hit bounds")

    return aopt
Example #28
0
def test_binom_conf_interval():

    # Test Wilson and Jeffreys interval for corner cases:
    # Corner cases: k = 0, k = n, conf = 0., conf = 1.
    n = 5
    k = [0, 4, 5]
    for conf in [0.0, 0.5, 1.0]:
        res = funcs.binom_conf_interval(k, n, conf=conf, interval="wilson")
        assert ((res >= 0.0) & (res <= 1.0)).all()
        res = funcs.binom_conf_interval(k, n, conf=conf, interval="jeffreys")
        assert ((res >= 0.0) & (res <= 1.0)).all()

    # Test Jeffreys interval accuracy against table in Brown et al. (2001).
    # (See `binom_conf_interval` docstring for reference.)
    k = [0, 1, 2, 3, 4]
    n = 7
    conf = 0.95
    result = funcs.binom_conf_interval(k, n, conf=conf, interval="jeffreys")
    table = np.array([[0.000, 0.016, 0.065, 0.139, 0.234], [0.292, 0.501, 0.648, 0.766, 0.861]])
    assert_allclose(result, table, atol=1.0e-3, rtol=0.0)

    # Test Wald interval
    result = funcs.binom_conf_interval(0, 5, interval="wald")
    assert_allclose(result, 0.0)  # conf interval is [0, 0] when k = 0
    result = funcs.binom_conf_interval(5, 5, interval="wald")
    assert_allclose(result, 1.0)  # conf interval is [1, 1] when k = n
    result = funcs.binom_conf_interval(500, 1000, conf=0.68269, interval="wald")
    assert_allclose(result[0], 0.5 - 0.5 / np.sqrt(1000.0))
    assert_allclose(result[1], 0.5 + 0.5 / np.sqrt(1000.0))
Example #29
0
    def write_v_sim(self, amplitude=1.0, factor=VaspToTHz, filename="anime.ascii"):
        self._set_cell_oriented()
        lat = self._lattice_oriented
        q = self._qpoint
        text = "# Phonopy generated file for v_sim 3.6\n"
        text += "%15.9f%15.9f%15.9f\n" % (lat[0, 0], lat[1, 0], lat[1, 1])
        text += "%15.9f%15.9f%15.9f\n" % (lat[2, 0], lat[2, 1], lat[2, 2])
        for s, p in zip(self._symbols, self._positions_oriented):
            text += "%15.9f%15.9f%15.9f %2s\n" % (p[0], p[1], p[2], s)

        for i, val in enumerate(self._eigenvalues):
            if val > 0:
                omega = np.sqrt(val)
            else:
                omega = -np.sqrt(-val)
            self._set_displacements(i)
            text += "#metaData: qpt=[%f;%f;%f;%f \\\n" % (q[0], q[1], q[2], omega * factor)
            for u in self._get_oriented_displacements(self._displacements) * amplitude:
                text += "#; %f; %f; %f; %f; %f; %f \\\n" % (
                    u[0].real,
                    u[1].real,
                    u[2].real,
                    u[0].imag,
                    u[1].imag,
                    u[2].imag,
                )
            text += "# ]\n"
        w = open(filename, "w")
        w.write(text)
        w.close()
Example #30
0
    def __init__(self, rng, input, n_in, n_out, activation, W=None, b=None, use_bias=False):

        self.input = input
        self.activation = activation

        if W is None:
            if activation.func_name == "ReLU":
                W_values = numpy.asarray(0.01 * rng.standard_normal(size=(n_in, n_out)), dtype=theano.config.floatX)
            else:
                W_values = numpy.asarray(
                    rng.uniform(
                        low=-numpy.sqrt(6.0 / (n_in + n_out)), high=numpy.sqrt(6.0 / (n_in + n_out)), size=(n_in, n_out)
                    ),
                    dtype=theano.config.floatX,
                )
            W = theano.shared(value=W_values, name="W")
        if b is None:
            b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
            b = theano.shared(value=b_values, name="b")

        self.W = W
        self.b = b

        if use_bias:
            lin_output = T.dot(input, self.W) + self.b
        else:
            lin_output = T.dot(input, self.W)

        self.output = lin_output if activation is None else activation(lin_output)

        # parameters of the model
        if use_bias:
            self.params = [self.W, self.b]
        else:
            self.params = [self.W]