예제 #1
0
def data_snr_maximized_extrinsic(frequencies,
                                 data,
                                 detector,
                                 chirpm,
                                 symmratio,
                                 spin1,
                                 spin2,
                                 Luminosity_Distance,
                                 theta,
                                 phi,
                                 iota,
                                 alpha_squared,
                                 bppe,
                                 NSflag,
                                 cosmology=cosmology.Planck15):
    noise_temp, noisefunc, f = IMRPhenomD.populate_noise(detector,
                                                         int_scheme='quad')
    noise = noisefunc(frequencies)**2

    template_detector_response = detector_response_dCS(
        frequencies, chirpm, symmratio, spin1, spin2, Luminosity_Distance,
        theta, phi, iota, alpha_squared, bppe, NSflag, cosmology)
    int1 = 4 * simps((np.conjugate(template_detector_response) *
                      template_detector_response).real / noise, frequencies)
    snr_template = np.sqrt(int1)

    g_tilde = 4 * np.divide(
        np.multiply(np.conjugate(data), template_detector_response), noise)
    g = np.fft.ifft(g_tilde)
    gmag = np.abs(g)
    deltaf = frequencies[1] - frequencies[0]
    maxg = np.amax(gmag).real * (len(frequencies)) * (deltaf)
    return maxg / snr_template
예제 #2
0
    def cost(self, controls, states, system_eval_step):
        """
        Compute the penalty.

        Arguments:
        controls
        states
        system_eval_step

        Returns:
        cost
        """
        # The cost is the infidelity of each evolved state and its target state.
        if self.neglect_relative_pahse == False:
            inner_products = anp.matmul(self.target_states_dagger,
                                        states)[:, 0, 0]
            inner_products_sum = anp.sum(inner_products)
            fidelity_normalized = anp.real(
                inner_products_sum *
                anp.conjugate(inner_products_sum)) / self.state_count**2
            infidelity = 1 - fidelity_normalized
            # Normalize the cost for the number of times the cost is evaluated.
            cost_normalized = infidelity / self.cost_eval_count
        else:
            inner_products = anp.matmul(self.target_states_dagger,
                                        states)[:, 0, 0]
            fidelities = anp.real(inner_products *
                                  anp.conjugate(inner_products))
            fidelity_normalized = anp.sum(fidelities) / self.state_count
            infidelity = 1 - fidelity_normalized
            # Normalize the cost for the number of times the cost is evaluated.
            cost_normalized = infidelity / self.cost_eval_count
        return cost_normalized * self.cost_multiplier
    def log_loss(K_conj):
        """
            K is a tensor of CONJUGATE Kraus Operators of dim s x w x n x n
            s: output_dim
            w: ops_per_output
            n: state_dim
        """
        total_loss = 0.0

        # Iterate over each sequence in batch
        for i in range(batch.shape[0]):
            seq = batch[i]

            rho_new = np.log(rho.copy())

            # burn in
            for b in range(burn_in):
                temp_rho = np.zeros(
                    [K_conj.shape[1], K_conj.shape[2], K_conj.shape[3]],
                    dtype='complex128')
                for w in range(K_conj.shape[1]):
                    temp_rho[w, :, :] = np.dot(
                        np.dot(K[int(seq[b]) - 1, w, :, :], rho_new),
                        np.conjugate(K[int(seq[b]) - 1, w, :, :]).T)
                rho_new = np.sum(temp_rho, 0)
                rho_new = rho_new / np.trace(rho_new)

            # Compute likelihood for the sequence
            for s in seq[burn_in:]:
                rho_sum = logdotexp(
                    logdotexp(
                        np.log(np.conjugate(K_conj[int(s) - 1, 0, :, :])),
                        rho_new), np.log(K_conj[int(s) - 1, 0, :, :].T))
                for w in range(1, K_conj.shape[1]):
                    # subtract 1 to adjust for MATLAB indexing
                    rho_sum = logaddexp(
                        rho_sum,
                        logdotexp(
                            logdotexp(
                                np.log(
                                    np.conjugate(K_conj[int(s) - 1,
                                                        w, :, :])), rho_new),
                            np.log(K_conj[int(s) - 1, w, :, :].T)))

                rho_new = rho_sum

            total_loss += np.real(logsumexp(np.diag(rho_new)))

        return -total_loss / batch.shape[0]
예제 #4
0
def matrix_pnorm_condition_number_autograd(x1, y1, x2, y2, x3, y3, x4, y4, x5,
                                           y5, x6, y6, P):
    A = calculate_A_matrix_autograd(x1, y1, x2, y2, x3, y3, x4, y4, x5, y5, x6,
                                    y6, P)
    A = np.conjugate(A)
    U, s, Vt = np.linalg.svd(A, 0)
    m = U.shape[0]
    n = Vt.shape[1]
    rcond = 1e-10
    cutoff = rcond * np.max(s)

    #    for i in range(min(n, m)):
    #        if s[i] > cutoff:
    #            s[i] = 1./s[i]
    #        else:
    #            s[i] = 0.
    new_s = list()
    for i in range(min(n, m)):
        if s[i] > cutoff:
            new_s.append(1. / s[i])
        else:
            new_s.append(0.)
    new_s = np.array(new_s)
    pinv = np.dot(Vt.T, np.multiply(s[:, np.newaxis], U.T))
    #https://de.mathworks.com/help/symbolic/cond.html?requestedDomain=www.mathworks.com
    return np.linalg.norm(A) * np.linalg.norm(pinv)
예제 #5
0
    def cost(self, controls, states, system_eval_step):
        """
        Compute the penalty.

        Arguments:
        controls
        states
        system_eval_step

        Returns:
        cost
        """
        if self.max_control_norms is None:
            normalized_controls = controls / self.max_control_norms
        else:
            normalized_controls = controls

        # Penalize the square of the absolute value of the difference
        # in value of the control parameters from one step to the next.
        diffs = anp.diff(normalized_controls, axis=0, n=self.order)
        cost = anp.sum(anp.real(diffs * anp.conjugate(diffs)))
        # You can prove that the square of the complex modulus of the difference
        # between two complex values is l.t.e. 2 if the complex modulus
        # of the two complex values is l.t.e. 1 respectively using the
        # triangle inequality. This fact generalizes for higher order differences.
        # Therefore, a factor of 2 should be used to normalize the diffs.
        cost_normalized = cost / self.cost_normalization_constant

        return cost_normalized * self.cost_multiplier
예제 #6
0
    def cost(self, controls, states, system_eval_step):
        """
        Compute the penalty.

        Arguments:
        controls
        states
        system_eval_step

        Returns:
        cost
        """
        # The cost is the overlap (fidelity) of the evolved state and each
        # forbidden state.
        cost = 0
        for i, forbidden_states_dagger_ in enumerate(
                self.forbidden_states_dagger):
            state = states[i]
            state_cost = 0
            for forbidden_state_dagger in forbidden_states_dagger_:
                inner_product = anp.matmul(forbidden_state_dagger, state)[0, 0]
                fidelity = anp.real(inner_product *
                                    anp.conjugate(inner_product))
                state_cost = state_cost + fidelity
            #ENDFOR
            state_cost_normalized = state_cost / self.forbidden_states_count[i]
            cost = cost + state_cost_normalized
        #ENDFOR

        # Normalize the cost for the number of evolving states
        # and the number of times the cost is computed.
        cost_normalized = cost / self.cost_normalization_constant

        return cost_normalized * self.cost_multiplier
    def losswlog(K_conj):
        """
            K is a tensor of CONJUGATE Kraus Operators of dim s x y x x x x
            s: dim of features
            y: number of features
            x: number of labels
        """
        total_loss = 0.0

        # Iterate over each sequence in batch
        for i in range(labels.shape[0]):
            features = feats_matrix[i, :]
            label = labels[i] - 1

            # Compute likelihood of the label generating the given features
            conjKrausProduct = K_conj[features[0] - 1, 0, :, :]
            for s in range(1, features.shape[0]):
                conjKrausProduct = np.dot(K_conj[features[s] - 1, s, :, :],
                                          conjKrausProduct)

            eta = np.zeros([K_conj.shape[3], K_conj.shape[3]],
                           dtype='complex128')
            eta[label, label] = 1

            prod1 = np.dot(np.conjugate(conjKrausProduct), eta)
            prod2 = np.dot(prod1, conjKrausProduct.T)
            total_loss += np.log(np.real(np.trace(prod2)))

            # total_loss += np.real(np.trace(np.kron(np.conjugate(conjKrausProduct)[:, label], conjKrausProduct.T[:, label]).reshape(K_conj.shape[2], K_conj.shape[3])))

        return -total_loss / labels.shape[0]
예제 #8
0
 def test_inner(self):
     X = self.man.rand()
     G = self.man.randvec(X)
     H = self.man.randvec(X)
     np_testing.assert_almost_equal(np.real(np.trace(np.conjugate(G.T)@H)),
                                    self.man.inner(X, G, H))
     assert np.isreal(self.man.inner(X, G, H))
예제 #9
0
def molecular_density_matrix(n_electron, c):
    r"""Compute the molecular density matrix.

    The density matrix :math:`P` is computed from the molecular orbital coefficients :math:`C` as

    .. math::

        P_{\mu \nu} = \sum_{i=1}^{N} C_{\mu i} C_{\nu i},

    where :math:`N = N_{electrons} / 2` is the number of occupied orbitals. Note that the total
    density matrix is the sum of the :math:`\alpha` and :math:`\beta` density
    matrices, :math:`P = P^{\alpha} + P^{\beta}`.

    Args:
        n_electron (integer): number of electrons
        c (array[array[float]]): molecular orbital coefficients

    Returns:
        array[array[float]]: density matrix

    **Example**

    >>> c = np.array([[-0.54828771,  1.21848441], [-0.54828771, -1.21848441]])
    >>> n_electron = 2
    >>> density_matrix(n_electron, c)
    array([[0.30061941, 0.30061941], [0.30061941, 0.30061941]])
    """
    p = anp.dot(c[:, :n_electron // 2],
                anp.conjugate(c[:, :n_electron // 2]).T)
    return p
예제 #10
0
def autocov(samples, axis=-1):
        """Compute autocovariance estimates for every lag for the input array.
        Parameters
        ----------
        samples : `numpy.ndarray(n_chains, n_iters)`
            An array containing samples
        Returns
        -------
        acov: `numpy.ndarray`
            Autocovariance of samples that has same size as the input array
        """
        axis = axis if axis > 0 else len(samples.shape) + axis
        n = samples.shape[axis]
        m = next_fast_len(2 * n)

        samples = samples - samples.mean(axis, keepdims=True)

        # added to silence tuple warning for a submodule
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")

            ifft_samp = np.fft.rfft(samples, n=m, axis=axis)
            ifft_samp *= np.conjugate(ifft_samp)

            shape = tuple(
                slice(None) if dim_len != axis else slice(0, n) for dim_len, _ in enumerate(samples.shape)
            )
            cov = np.fft.irfft(ifft_samp, n=m, axis=axis)[shape]
            cov /= n
            return cov
예제 #11
0
    def cost(self, controls, states, system_eval_step):
        """
        Compute the penalty.

        Arguments:
        controls
        states
        system_eval_step

        Returns:
        cost
        """
        # Normalize the controls.
        if self.max_control_norms is not None:
            controls = controls / self.max_control_norms
            
        # Weight the controls.
        if self.control_weights is not None:
            controls = controls[:,] * self.control_weights

        # The cost is the sum of the square of the modulus of the normalized,
        # weighted, controls.
        cost = anp.sum(anp.real(controls * anp.conjugate(controls)))
        cost_normalized = cost / self.controls_size
        
        return cost_normalized * self.cost_multiplier
예제 #12
0
 def test_inner(self):
     X = self.man.rand()
     G = self.man.randvec(X)
     H = self.man.randvec(X)
     np_testing.assert_allclose(
         np.real(np.sum(np.conjugate(G) * H)),
         self.man.inner(X, G, H))
     assert np.isreal(self.man.inner(X, G, H))
예제 #13
0
 def test_inner_product(self):
     X = self.manifold.random_point()
     G = self.manifold.random_tangent_vector(X)
     H = self.manifold.random_tangent_vector(X)
     np_testing.assert_allclose(
         np.real(np.sum(np.conjugate(G) * H)),
         self.manifold.inner_product(X, G, H),
     )
     assert np.isreal(self.manifold.inner_product(X, G, H))
예제 #14
0
 def test_inner_product(self):
     X = self.manifold.random_point()
     G = self.manifold.random_tangent_vector(X)
     H = self.manifold.random_tangent_vector(X)
     np_testing.assert_almost_equal(
         np.real(np.trace(np.conjugate(G.T) @ H)),
         self.manifold.inner_product(X, G, H),
     )
     assert np.isreal(self.manifold.inner_product(X, G, H))
예제 #15
0
def log_likelihood_detector_frame_SNR(Data,
                                      frequencies,
                                      noise,
                                      SNR,
                                      t_c,
                                      phi_c,
                                      chirpm,
                                      symmratio,
                                      spin1,
                                      spin2,
                                      alpha_squared,
                                      bppe,
                                      NSflag,
                                      detector,
                                      cosmology=cosmology.Planck15):
    mass1 = utilities.calculate_mass1(chirpm, symmratio)
    mass2 = utilities.calculate_mass2(chirpm, symmratio)
    DL = 100 * mpc
    model = dcsimr_detector_frame(mass1=mass1,
                                  mass2=mass2,
                                  spin1=spin1,
                                  spin2=spin2,
                                  collision_time=t_c,
                                  collision_phase=phi_c,
                                  Luminosity_Distance=DL,
                                  phase_mod=alpha_squared,
                                  cosmo_model=cosmology,
                                  NSflag=NSflag)
    #model = imrdf(mass1=mass1,mass2=mass2, spin1=spin1,spin2=spin2, collision_time=t_c,collision_phase=phi_c,
    #                Luminosity_Distance=DL, cosmo_model=cosmology,NSflag=NSflag)
    frequencies = np.asarray(frequencies)
    model.fix_snr_series(snr_target=SNR,
                         detector=detector,
                         frequencies=frequencies)

    amp, phase, hreal = model.calculate_waveform_vector(frequencies)
    #h_complex = np.multiply(amp,np.add(np.cos(phase),-1j*np.sin(phase)))
    h_complex = amp * np.exp(-1j * phase)
    #noise_temp,noise_func, freq = model.populate_noise(detector=detector,int_scheme='quad')
    resid = np.subtract(Data, h_complex)
    #integrand_numerator = np.multiply(np.conjugate(Data), h_complex) + np.multiply(Data,np.conjugate( h_complex))
    integrand_numerator = np.multiply(resid, np.conjugate(resid))

    #noise_root =noise_func(frequencies)
    #noise = np.multiply(noise_root, noise_root)
    integrand = np.divide(integrand_numerator, noise)
    integral = np.real(simps(integrand, frequencies))
    #integral = np.real(np.sum(integrand))
    return -2 * integral
예제 #16
0
def conjugate_transpose(matrix):
    """
    Compute the conjugate transpose of a matrix.
    Args:
    matrix :: numpy.ndarray - the matrix to compute
        the conjugate transpose of
    operation_policy :: qoc.OperationPolicy - what data type is
        used to perform the operation and with which method
    Returns:
    _conjugate_tranpose :: numpy.ndarray the conjugate transpose
        of matrix
    """
    conjugate_transpose_ = anp.conjugate(anp.swapaxes(matrix, -1, -2))

    return conjugate_transpose_
예제 #17
0
def rms_norm(array):
    """
    Compute the rms norm of the array.

    Arguments:
    array :: ndarray (N) - The array to compute the norm of.

    Returns:
    norm :: float - The rms norm of the array.
    """
    square_norm = anp.sum(array * anp.conjugate(array))
    size = anp.prod(anp.shape(array))
    rms_norm_ = anp.sqrt(square_norm / size)

    return rms_norm_
예제 #18
0
파일: expm.py 프로젝트: scarab187/pymack04
def expm_eigh(h):
    """
    Compute the unitary operator of a hermitian matrix.
    U = expm(-1j * h)

    Arguments:
    h :: ndarray (N X N) - The matrix to exponentiate, which must be hermitian.
    
    Returns:
    expm_h :: ndarray(N x N) - The unitary operator of a.
    """
    eigvals, p = anp.linalg.eigh(h)
    p_dagger = anp.conjugate(anp.swapaxes(p, -1, -2))
    d = anp.exp(-1j * eigvals)
    return anp.matmul(p *d, p_dagger)
예제 #19
0
def log_likelihood_Full(Data,
                        frequencies,
                        A0,
                        t_c,
                        phi_c,
                        chirpm,
                        symmratio,
                        chi_s,
                        chi_a,
                        beta,
                        bppe,
                        NSflag,
                        N_detectors,
                        detector,
                        cosmology=cosmology.Planck15):
    DL = ((np.pi / 30)**(1 / 2) / A0) * chirpm**2 * (np.pi * chirpm)**(-7 / 6)
    Z = Distance(DL / mpc, unit=u.Mpc).compute_z(cosmology=cosmology)
    chirpme = chirpm / (1 + Z)
    mass1 = utilities.calculate_mass1(chirpme, symmratio)
    mass2 = utilities.calculate_mass2(chirpme, symmratio)
    chi1 = chi_s + chi_a
    chi2 = chi_s - chi_a
    model = modimrins(mass1=mass1,
                      mass2=mass2,
                      spin1=chi1,
                      spin2=chi2,
                      collision_time=t_c,
                      collision_phase=phi_c,
                      Luminosity_Distance=DL,
                      phase_mod=beta,
                      bppe=bppe,
                      cosmo_model=cosmology,
                      NSflag=NSflag,
                      N_detectors=N_detectors)
    frequencies = np.asarray(frequencies)
    amp, phase, hreal = model.calculate_waveform_vector(frequencies)
    h_complex = np.multiply(amp, np.add(np.cos(phase), 1j * np.sin(phase)))
    noise_temp, noise_func, freq = model.populate_noise(detector=detector,
                                                        int_scheme='quad')
    resid = np.subtract(Data, h_complex)
    #integrand_numerator = np.multiply(np.conjugate(Data), h_complex) + np.multiply(Data,np.conjugate( h_complex))
    integrand_numerator = np.multiply(resid, np.conjugate(resid))

    noise_root = noise_func(frequencies)
    noise = np.multiply(noise_root, noise_root)
    integrand = np.divide(integrand_numerator, noise)
    integral = np.real(simps(integrand, frequencies))
    return -2 * integral
예제 #20
0
def conjugate(x, operation_policy=OperationPolicy.CPU):
    """
    Compute the conjugate of a value.
    
    Args:
    x :: ndarray - the value to compute the conjugate of
    
    Returns:
    conj :: ndarray - the conjugate of x
    """
    if operation_policy == OperationPolicy.CPU:
        conj = anp.conjugate(x)
    else:
        pass

    return conj
예제 #21
0
def log_likelihood(Data,
                   frequencies,
                   DL,
                   t_c,
                   phi_c,
                   chirpm,
                   symmratio,
                   spin1,
                   spin2,
                   alpha_squared,
                   bppe,
                   NSflag,
                   N_detectors,
                   detector,
                   cosmology=cosmology.Planck15):
    #Z = Distance(DL/mpc,unit=u.Mpc).compute_z(cosmology = cosmology)
    #chirpme = chirpm/(1+Z)
    mass1 = utilities.calculate_mass1(chirpm, symmratio)
    mass2 = utilities.calculate_mass2(chirpm, symmratio)
    model = dcsimr_detector_frame(mass1=mass1,
                                  mass2=mass2,
                                  spin1=spin1,
                                  spin2=spin2,
                                  collision_time=t_c,
                                  collision_phase=phi_c,
                                  Luminosity_Distance=DL,
                                  phase_mod=alpha_squared,
                                  cosmo_model=cosmology,
                                  NSflag=NSflag,
                                  N_detectors=N_detectors)
    frequencies = np.asarray(frequencies)
    amp, phase, hreal = model.calculate_waveform_vector(frequencies)
    #h_complex = np.multiply(amp,np.add(np.cos(phase),-1j*np.sin(phase)))
    h_complex = amp * np.exp(-1j * phase)
    noise_temp, noise_func, freq = model.populate_noise(detector=detector,
                                                        int_scheme='quad')
    resid = np.subtract(Data, h_complex)
    #integrand_numerator = np.multiply(np.conjugate(Data), h_complex) + np.multiply(Data,np.conjugate( h_complex))
    integrand_numerator = np.multiply(resid, np.conjugate(resid))

    noise_root = noise_func(frequencies)
    noise = np.multiply(noise_root, noise_root)
    integrand = np.divide(integrand_numerator, noise)
    integral = np.real(simps(integrand, frequencies))
    return -2 * integral
예제 #22
0
def autocorr_function(x):
    """Estimate the normalized autocorrelation function of a 1-D series
    .. note:: This is from `emcee <https://github.com/dfm/emcee>`_.
    Args:
        x: The series as a 1-D numpy array.
    Returns:
        The autocorrelation function of the time series.
    """
    x = np.atleast_1d(x)
    if len(x.shape) != 1:
        raise ValueError("invalid dimensions for 1D autocorrelation function")
    n = next_pow_two(len(x))

    # Compute the FFT and then (from that) the auto-correlation function
    f = np.fft.fft(x - np.mean(x), n=2 * n)
    acf = np.fft.ifft(f * np.conjugate(f))[:len(x)].real
    acf /= acf[0]
    return acf
예제 #23
0
def conjugate_transpose(matrix, operation_policy=OperationPolicy.CPU):
    """
    Compute the conjugate transpose of a matrix.
    Args:
    matrix :: numpy.ndarray - the matrix to compute
        the conjugate transpose of
    operation_policy :: qoc.OperationPolicy - what data type is
        used to perform the operation and with which method
    Returns:
    _conjugate_tranpose :: numpy.ndarray the conjugate transpose
        of matrix
    """
    if operation_policy == OperationPolicy.CPU:
        _conjugate_transpose = anp.conjugate(transpose(matrix,
                                                       operation_policy))
    else:
        pass
    
    return _conjugate_transpose
예제 #24
0
    def cost(self, controls, states, system_eval_step):
        """
        Compute the penalty.

        Arguments:
        controls
        states
        system_eval_step

        Returns:
        cost
        """
        # The cost is the infidelity of each evolved state and its target state.
        inner_products = anp.matmul(self.target_states_dagger, states)[:, 0, 0]
        fidelities = anp.real(inner_products * anp.conjugate(inner_products))
        fidelity_normalized = anp.sum(fidelities) / self.state_count
        infidelity = 1 - fidelity_normalized

        return infidelity * self.cost_multiplier
예제 #25
0
    def cost(self, controls, densities, system_eval_step):
        """
        Compute the penalty.

        Arguments:
        controls
        densities
        system_eval_step
        
        Returns:
        cost
        """
        # The cost is the overlap (fidelity) of the evolved density and each
        # forbidden density.
        cost = 0
        for i, forbidden_densities_dagger_ in enumerate(
                self.forbidden_densities_dagger):
            density = densities[i]
            density_cost = 0
            for forbidden_density_dagger in forbidden_densities_dagger_:
                inner_product = (
                    anp.trace(anp.matmul(forbidden_density_dagger, density)) /
                    self.hilbert_size)
                fidelity = anp.real(inner_product *
                                    anp.conjugate(inner_product))
                density_cost = density_cost + fidelity
            #ENDFOR
            density_cost_normalized = density_cost / self.forbidden_densities_count[
                i]
            cost = cost + density_cost_normalized
        #ENDFOR

        # Normalize the cost for the number of evolving densities
        # and the number of times the cost is computed.
        cost_normalized = cost / self.cost_normalization_constant

        return cost_normalized * self.cost_multiplier
N_T = matmuls(A_DAGGER_T, A_T)

HILBERT_SIZE = CAVITY_STATE_COUNT * TRANSMON_STATE_COUNT
H_SYSTEM_0 = (
    np.divide(KAPPA, 2) * np.kron(A_DAGGER_2_A_2_C, I_T) +
    np.divide(ALPHA, 2) * np.kron(I_C, A_DAGGER_2_A_2_T) + CHI_E *
    np.kron(N_C, np.matmul(TRANSMON_E, conjugate_transpose(TRANSMON_E))) +
    CHI_F *
    np.kron(N_C, np.matmul(TRANSMON_F, conjugate_transpose(TRANSMON_F))))
# + np.divide(CHI_PRIME, 2) * np.kron(A_DAGGER_2_A_2_C, N_T))
H_CONTROL_0_0 = np.kron(A_C, I_T)
H_CONTROL_0_1 = np.kron(A_DAGGER_C, I_T)
H_CONTROL_1_0 = np.kron(I_C, A_T)
H_CONTROL_1_1 = np.kron(I_C, A_DAGGER_T)
hamiltonian = (lambda params, t: H_SYSTEM_0 + params[
    0] * H_CONTROL_0_0 + anp.conjugate(params[0]) * H_CONTROL_0_1 + params[1] *
               H_CONTROL_1_0 + anp.conjugate(params[1]) * H_CONTROL_1_1)

# Define the optimization.
PARAM_COUNT = 2
MAX_PARAM_NORMS = (
    MAX_AMP_C,
    MAX_AMP_T,
)
OPTIMIZER = Adam()
PULSE_TIME = 500
PULSE_STEP_COUNT = 500
ITERATION_COUNT = 5000

# Define the problem.
INITIAL_STATE_0 = np.kron(CAVITY_ZERO, TRANSMON_G)
예제 #27
0
def hamiltonian(controls, time):
    return (SYSTEM_HAMILTONIAN
            + controls[0] * CONTROL_0
            + anp.conjugate(controls[0]) * CONTROL_0_DAGGER
            + controls[1] * CONTROL_1
            + anp.conjugate(controls[1]) * CONTROL_1_DAGGER)
예제 #28
0
def rmsNorm(x):
    squareNorm = np.sum(x * np.conjugate(x))
    size = np.prod(x.shape)
    return np.sqrt(squareNorm / size)
예제 #29
0
TRANSMON_F = anp.array(((0, ), (0, ), (1, )))
TRANSMON_F_DAGGER = conjugate_transpose(TRANSMON_F)
TRANSMON_I = anp.eye(TRANSMON_STATE_COUNT)

H_SYSTEM = (
    CHI_E * anp.kron(anp.matmul(TRANSMON_E, TRANSMON_E_DAGGER), CAVITY_NUMBER)
    +
    CHI_F * anp.kron(anp.matmul(TRANSMON_F, TRANSMON_F_DAGGER), CAVITY_NUMBER))
H_GE = anp.kron(anp.matmul(TRANSMON_G, TRANSMON_E_DAGGER), CAVITY_I)
H_GE_DAGGER = conjugate_transpose(H_GE)
H_EF = anp.kron(anp.matmul(TRANSMON_E, TRANSMON_F_DAGGER), CAVITY_I)
H_EF_DAGGER = conjugate_transpose(H_EF)
H_C = anp.kron(TRANSMON_I, CAVITY_ANNIHILATE)
H_C_DAGGER = conjugate_transpose(H_C)
hamiltonian = (
    lambda params, t: H_SYSTEM + params[0] * H_GE + anp.conjugate(params[
        0]) * H_GE_DAGGER + params[1] * H_EF + anp.conjugate(params[1]) *
    H_EF_DAGGER + params[2] * H_C + anp.conjugate(params[2]) * H_C_DAGGER)
MAX_PARAM_NORMS = anp.array((
    MAX_AMP_T,
    MAX_AMP_T,
    MAX_AMP_C,
))

# Define the optimization.
ITERATION_COUNT = 1000
PARAM_COUNT = 3
PULSE_TIME = 250
PULSE_STEP_COUNT = PULSE_TIME

# Define the problem.
INITIAL_STATE_0 = anp.kron(TRANSMON_G, CAVITY_ZERO)
예제 #30
0
파일: getFid.py 프로젝트: Qcrew/LabTools
 def prod(C,D): return np.abs(np.sum( np.conjugate(C) * D ))
 return prod(A, B) / prod(A, A)