def grad_k(ws, fdensity, alpha, sig, psf_k):
    #print('grad_k begin');
    mo = np.exp(-4.)
    ws = real_to_complex(ws)
    ws = ws.reshape((n_grid, n_grid))
    #wk = ws;
    #ws = np.real(fft.ifft2(ws));

    l1 = fft.ifft2(
        (np.real(fft.ifft2(ws * psf_k)) - data) / sig_noise**2) * psf_k
    #print(l1-l1_other)
    l1 = l1.flatten()
    ws = fft.ifft2(ws)
    xsi = (1. - fdensity) * gaussian(np.log(ws), loc=np.log(
        mo), scale=sig) / ws + fdensity * (ws**alpha / w_norm)
    l2 = -1 * gaussian(np.log(ws), loc=np.log(mo), scale=sig) * (
        1. - fdensity) / ws**2 - (1. - fdensity) * np.log(ws / mo) * np.exp(
            -np.log(ws / mo)**2 / 2 / sig**2) / np.sqrt(
                2 * np.pi) / ws**2 / sig**3 + fdensity * alpha * ws**(
                    alpha - 1) / w_norm
    l2 = l2 / np.absolute(xsi)
    l2 = fft.ifft2(l2).flatten()
    l_tot = l1 - l2
    #return l1,l2;
    l_tot = complex_to_real(np.conj(l_tot))
    #print('grad is');
    #print(l_tot);
    return l_tot
Exemplo n.º 2
0
    def loss_fun(self, params):
        """Returns total loss for given parameters.

        Args
        ----
            params (autograd.numpy.ndarray): Unflattend weights and biases.

        Returns
        -------
            float: value of the loss function
        """

        x = self.x 
        bcs = self.bcs
            
        y_pred = _predict(params, x, self.activation)
        y_xx_pred = _predict_xx(params, x, self.activation)

        I = simps((y_pred**2).ravel(), x.ravel())
        H = - hbar**2/(2*m_e) * y_xx_pred
        E = simps((np.conj(y_pred) * H).ravel(), x.ravel()) / I
    
        return np.sum((H - E * y_pred)**2) / I \
               + (y_pred[0] - bcs[0])**2 + (y_pred[-1] - bcs[-1])**2 \
               + (y_pred[int(x.shape[0]/2)] - np.sqrt(2))**2
Exemplo n.º 3
0
Arquivo: solve2.py Projeto: a1k0n/303
def FilterResponse(z, x, n):
    ''' fixed filter topology:
    w, pr12, pi12, p3, p4, z1 = x

    poles 1 and 2 are complex conjugates, and 3 is on the real axis; together
    they form a 3-pole lowpass filter.

    pole 4 and zero 1 are paired up opposite the imaginary axis and slightly
    offset to the -real direction; together they form an allpass + high pass
    filter to get closer to the real machine's response

    everything is scaled in the laplace plane by cutoff frequency w.

    we have a parameter degeneracy w.r.t. w, so we'll fix pi12 to 1.0.
    '''

    Q, p3, p4, z1, w0, w1, wdecay = x
    w = w1 * np.exp(-np.arange(n) * wdecay) + w0
    p1 = np.exp(w * (-1.0 / Q + 1j))
    p2 = np.conj(p1)

    p3 = np.exp(w * p3 / Q)
    p4 = np.exp(w * p4 / Q)
    # p3 = np.exp(w * (p3 / Q + p4 * 1j))
    # p4 = np.conj(p3)

    # not sure whether p4/z1 should scale with cutoff.
    z1 = np.exp(w * z1)

    z = z[:, np.newaxis]

    h = (z - z1) * (z - z1) / ((z - p1) * (z - p2) * (z - p3) * (z - p4))
    return (h / h[0]).T
def squared_L2_norm(x):
    """
    Squared L2 norm
    """
    # CAUTION: autograd wrongly handle numpy.linalg.norm for a complex array
    x = x.ravel()
    return numpy.real(numpy.dot(numpy.conj(x.transpose()), x))
Exemplo n.º 5
0
 def areal(self, th, f, alpha):
     th_comp = self.real_to_complex(th)
     gfunc = Agrad.holomorphic_grad(
         lambda tt: self.loss_fn_real(tt, f, alpha))
     grad_comp = gfunc(th_comp)
     grad_real = self.complex_to_real(np.conj(grad_comp))
     return grad_real
Exemplo n.º 6
0
def neg_grad_FZ(FZ):
    # shift states
    out1 = np.dot(get_F_minus_states(FZ), _shift_right_mat) + np.dot(
        get_F_plus_states(FZ), _shift_left_mat) + get_Z_states(FZ)
    out2 = out1 + get_F0_minus(np.conj(np.roll(out1, 1, axis=0)))

    return out2
Exemplo n.º 7
0
Arquivo: solve2.py Projeto: a1k0n/303
 def filterr(x):
     h = FilterResponse(z, x, N)
     h = np.log(np.abs(h))
     # e = np.clip(h, 1e-2, 1e2) - targetH
     e = h - targetH
     err = weight * np.real(e * np.conj(e))
     return np.sum(err)
Exemplo n.º 8
0
def coal_var(template, data, noise):
    g_tilde = G_tilde(template, data, noise)
    g = np.fft.ifft(g_tilde)
    tc_index = np.argmax(np.multiply(g, np.conj(g)))
    tc = 0
    phic = np.arctan(g[tc_index].imag / g[tc_index].real)
    return phic, tc
Exemplo n.º 9
0
Arquivo: fca.py Projeto: zuoshifan/FCA
def free_kurtosis(X):
    """Compute free kurtosis k4 of a matrix X. This works for both self-adjoint and rectangular matrices."""
    N, M = X.shape
    # XXH = np.dot(X, X.T.conj())
    XXH = np.dot(X, np.conj(X.T))
    k4 = np.trace(np.dot(XXH, XXH)) / N - (1 + N / M) * (np.trace(XXH) / N)**2

    return k4
Exemplo n.º 10
0
def grad_FZ(FZ):
    # shift the states
    out1 = np.dot(get_F_plus_states(FZ), _shift_right_mat) + np.dot(
        get_F_minus_states(FZ), _shift_left_mat) + get_Z_states(FZ)

    # fill in F0+ using a mask
    out2 = out1 + get_F0_plus(np.conj(np.roll(out1, -1, axis=0)))

    return out2
Exemplo n.º 11
0
 def grad_prior(self,wsp,ws,ws_k,xi,f,alpha):
     w_norm = (self.wlim[1]**(alpha+1) - self.wlim[0]**(alpha+1))/(alpha+1); #normalization from integrating
     param_term = 1/(1+np.exp(-1*wsp/xi)) #differentiation term due to parametrization
     #grad = fft.ifft2((-1/ws - (np.log(ws)-norm_mean)/ws/norm_sig**2)*param_term); #version with p1=0
     numerator = (1+(np.log(ws)-self.norm_mean)/self.norm_sig**2)*self.lognorm(ws)/ws + f*alpha*ws**(alpha-1)/w_norm;
     prior = self.lognorm(ws)*(1-f) + f*ws**(alpha)/w_norm; #prior w/o log
     grad = fft.ifft2(param_term*numerator/prior);
     grad_real = self.complex_to_real(np.conj(grad.flatten())); #embed to 2R
     return grad_real; #return 1d array
Exemplo n.º 12
0
Arquivo: fca.py Projeto: zuoshifan/FCA
def free_entropy_selfadj(X):
    """Compute free entropy chi of a self-adjoint matrix X."""
    N, M = X.shape
    assert N == M and np.allclose(X, np.conj(
        X.T)), 'X is not a self-adjoint matrix'
    e, U = la.eigh(X)
    chi = 2 * np.sum([
        np.log(np.abs(e[i] - e[j])) for i in range(N) for j in range(i + 1, N)
    ]) / (N * (N - 1))

    return chi
Exemplo n.º 13
0
def CRZ(theta):
    r"""Two-qubit controlled rotation about the z axis.

    Args:
        theta (float): rotation angle
    Returns:
        array[complex]: diagonal part of the 4x4 rotation matrix
        :math:`|0\rangle\langle 0|\otimes \mathbb{I}+|1\rangle\langle 1|\otimes R_z(\theta)`
    """
    p = np.exp(-0.5j * theta)
    return np.array([1.0, 1.0, p, np.conj(p)])
Exemplo n.º 14
0
def RZ(theta):
    r"""One-qubit rotation about the z axis.

    Args:
        theta (float): rotation angle

    Returns:
        array[complex]: the diagonal part of the rotation matrix :math:`e^{-i \sigma_z \theta/2}`
    """
    p = np.exp(-0.5j * theta)
    return np.array([p, np.conj(p)])
Exemplo n.º 15
0
Arquivo: layers.py Projeto: maka89/FIR
    def get_b(self):
        n = self.xlen - 1 - 2 * self.n_freqs
        Fb1 = self.Fbr + 1j * self.Fbi

        ll = [
            self.Fbc.reshape(self.n_ex, 1, self.n_out) + 0j, Fb1,
            np.zeros((self.n_ex, n, self.n_out)) + 0j,
            np.conj(Fb1[::-1])
        ]
        FW = np.concatenate(ll, axis=1)
        return np.real(np.fft.ifft(FW, axis=1))
Exemplo n.º 16
0
Arquivo: solve2.py Projeto: a1k0n/303
def PlotPoles(x, n):
    Q, p3, p4, z1, w0, w1, wdecay = x
    w = w1 * np.exp(-np.arange(n) * wdecay) + w0
    p1 = (w * (-1.0 / Q + 1j))
    p2 = np.conj(p1)
    p3 = (w * p3 / Q)
    p4 = (w * p4)
    z1 = (w * z1)

    a = np.vstack((p1, p2, p3, p4, z1))
    plt.axvline(0)
    plt.plot(np.real(a), np.imag(a), 'x')
Exemplo n.º 17
0
Arquivo: layers.py Projeto: maka89/FIR
    def get_W(self):

        n = self.xlen - 1 - 2 * self.n_freqs
        FW1 = self.FWr + 1j * self.FWi

        ll = [
            self.FWc.reshape(self.n_ex, 1, self.n_inp, self.n_out) + 0j, FW1,
            np.zeros((self.n_ex, n, self.n_inp, self.n_out)) + 0j,
            np.conj(FW1[::-1])
        ]
        FW = np.concatenate(ll, axis=1)
        return np.real(np.fft.ifft(FW, axis=1))
Exemplo n.º 18
0
def filterr(x):
    f = np.arange(1, 64)
    w = 2 * np.pi * f / 670.0
    z = np.exp(1j * w)
    weight = np.log(f + 1) - np.log(f)
    # this is also a dc-blocking filter
    h = FilterResponse(z, x)
    h = h / h[0]
    #e = np.log(np.clip(h, 1e-2, 1e2)) - np.log(np.clip(targetH[f], 1e-2, 1e2))
    e = np.clip(h, 1e-2, 1e2) - np.clip(targetH[f], 1e-2, 1e2)
    err = weight * np.real(e * np.conj(e))
    # err = weight * (np.abs(h) - np.abs(targetH[f]))**2
    return np.sum(err)
Exemplo n.º 19
0
Arquivo: solve2.py Projeto: a1k0n/303
def FilterCoeffs(x, n):
    Q, p3, p4, z1, w0, w1, wdecay = x
    w = w1 * np.exp(-np.arange(n) * wdecay) + w0
    p1 = np.exp(w * (-1.0 / Q + 1j))
    p2 = np.conj(p1)
    p3 = np.exp(w * p3 / Q)
    p4 = np.exp(w * p4)
    z1 = np.exp(w * z1)

    gain = np.ones(n)  # FIXME

    return np.vstack((gain, -z1, np.real(p1 + p2), -np.real(p1 * p2), -z1,
                      np.real(p3 + p4), -np.real(p3 * p4))).T
Exemplo n.º 20
0
def createDws(w, s, dL, shape, bloch_x=0.0, bloch_y=0.0):
    """ creates the derivative matrices
            NOTE: python uses C ordering rather than Fortran ordering. Therefore the
            derivative operators are constructed slightly differently than in MATLAB
    """

    Nx, Ny = shape

    if w is 'x':
        if Nx > 1:
            phasor_x = np.exp(1j * bloch_x)
            if s is 'f':
                # dxf = sp.diags([-1, 1, 1], [0, 1, -Nx+1], shape=(Nx, Nx))
                dxf = sp.diags([-1, 1, phasor_x], [0, 1, -Nx + 1],
                               shape=(Nx, Nx),
                               dtype=np.complex128)
                Dws = 1 / dL * sp.kron(dxf, sp.eye(Ny))
            else:
                # dxb = sp.diags([1, -1, -1], [0, -1, Nx-1], shape=(Nx, Nx))
                dxb = sp.diags([1, -1, -np.conj(phasor_x)], [0, -1, Nx - 1],
                               shape=(Nx, Nx),
                               dtype=np.complex128)
                Dws = 1 / dL * sp.kron(dxb, sp.eye(Ny))
        else:
            Dws = sp.eye(Ny)
    if w is 'y':
        if Ny > 1:
            phasor_y = np.exp(1j * bloch_y)
            if s is 'f':
                dyf = sp.diags([-1, 1, phasor_y], [0, 1, -Ny + 1],
                               shape=(Ny, Ny))
                Dws = 1 / dL * sp.kron(sp.eye(Nx), dyf)
            else:
                dyb = sp.diags([1, -1, -np.conj(phasor_y)], [0, -1, Ny - 1],
                               shape=(Ny, Ny))
                Dws = 1 / dL * sp.kron(sp.eye(Nx), dyb)
        else:
            Dws = sp.eye(Nx)
    return Dws
Exemplo n.º 21
0
Arquivo: layers.py Projeto: maka89/FIR
    def get_W(self):

        n = self.fir_length - 1 - 2 * self.n_freqs
        FW1 = self.FWr + 1j * self.FWi

        ll = [
            self.FWc.reshape(1, self.n_inp, self.n_out) + 0j, FW1,
            np.zeros((n, self.n_inp, self.n_out)) + 0j,
            np.conj(FW1[::-1])
        ]
        FW = np.concatenate(ll, axis=0)
        #print(FW.dtype)
        return np.real(np.fft.ifft(FW, axis=0))
Exemplo n.º 22
0
 def vjp(g):
     ge, gu = g
     ge = _matrix_diag(ge)
     f = 1/(e[..., anp.newaxis, :] - e[..., :, anp.newaxis] + 1.e-20)
     f -= _diag(f)
     ut = anp.swapaxes(u, -1, -2)
     r1 = f * _dot(ut, gu)
     r2 = -f * (_dot(_dot(ut, anp.conj(u)), anp.real(_dot(ut, gu)) * anp.eye(n)))
     r = _dot(_dot(anp.linalg.inv(ut), ge + r1 + r2), ut)
     if not anp.iscomplexobj(x):
         r = anp.real(r)
         # the derivative is still complex for real input (imaginary delta is allowed), real output
         # but the derivative should be real in real input case when imaginary delta is forbidden
     return r
Exemplo n.º 23
0
def QuadFitCurvatureMap(x):
    curv = []

    for i in range(len(x)):
        # do a look-ahead quadratic fit, just like the car would do
        pts = x[(np.arange(6) + i) % len(x)] / 100  # convert to meters
        basis = (pts[1] - pts[0]) / np.abs(pts[1] - pts[0])

        # project onto forward direction
        pts = (np.conj(basis) * (pts - pts[0]))

        p = np.polyfit(np.real(pts), np.imag(pts), 2)
        curv.append(p[0] / 2)

    return np.float32(curv)
Exemplo n.º 24
0
def QuadFitCurvatureMap(x):
    curv = []

    for i in range(len(x)):
        # do a look-ahead quadratic fit, just like the car would do
        pts = x[(np.arange(6) + i) % len(x)] / 100  # convert to meters
        basis = (pts[1] - pts[0]) / np.abs(pts[1] - pts[0])

        # project onto forward direction
        pts = (np.conj(basis) * (pts - pts[0]))

        p = np.polyfit(np.real(pts), np.imag(pts), 2)
        curv.append(p[0] / 2)

    return np.float32(curv)
Exemplo n.º 25
0
def forward_grad_np_var(g, ans, gvs, vs, x, axis=None, ddof=0, keepdims=False):
    if axis is None:
        if gvs.iscomplex:
            num_reps = gvs.size / 2
        else:
            num_reps = gvs.size
    elif isinstance(axis, int):
        num_reps = gvs.shape[axis]
    elif isinstance(axis, tuple):
        num_reps = anp.prod(anp.array(gvs.shape)[list(axis)])

    x_minus_mean = anp.conj(x - anp.mean(x, axis=axis, keepdims=True))
    return (2.0 *
            anp.sum(anp.real(g * x_minus_mean), axis=axis, keepdims=keepdims) /
            (num_reps - ddof))
Exemplo n.º 26
0
 def grad_like(self,wsp,ws,ws_k,xi):
     #print('start grad_like')
     conv = np.real(fft.ifft2(ws_k*self.psf_k)); #convolution of ws with psf
     term1 = (conv - self.data)/self.n_grid**2 /self.sig_noise**2 #term thats squared in like (with N in denom)
     grad = np.zeros((self.n_grid,self.n_grid),dtype='complex')
     for i in range(0,self.n_grid):
         for j in range(0,self.n_grid):        
             #try to modulate by hand
             ft1 = fft.fft2(1/(1+np.exp(-1*wsp/xi)));
             ftp = np.roll(ft1,(i,j),axis=(0,1));
             term2 = fft.ifft2(ftp*self.psf_k);
             grad[i,j] = np.sum(term1*term2);
     grad_real = self.complex_to_real(np.conj(grad.flatten())); #embed to 2R
     #print('end grad_like');
     return grad_real; #return 1d array
Exemplo n.º 27
0
def best_invec_phase(x, y, **kwargs):
    """Computes the l2-distance between `x` and `y` up to a global phasefactor.

    .. math::

        \min_\phi \Vert x - \mathrm{e}^{i \phi} y \Vert_2

    :param x, y: Input vectors of same length
    :param kwargs: Parameters passed to `scipy.optimize.minimize`
    :returns: Minimal distane (and possibly optimal vector `y`)

    """
    norm_sq = lambda x: np.real(np.dot(np.conj(x.ravel()), x.ravel()))
    cost = lambda phi: norm_sq(x - np.exp(1.j * phi) * y)
    # Choose initialization randomly to evade maximimum at opposite side
    result = minimize(cost, rand_angles(), jac=grad(cost), **kwargs)
    y_ = np.exp(1.j * result['x']) * y
    return y_, result['fun']
Exemplo n.º 28
0
        def cost_fn(x, return_fidelity=False):
            new_x = []
            pre = 0
            post = 0
            for i, system in enumerate(system_list):
                if systems_to_optimize[i]:
                    post = post + system.parameters["num_phases"]
                    new_x.append(x[pre:post])
                    pre = system.parameters["num_phases"]
            x = new_x

            for i, idx in enumerate(idxs):
                S_list[idx] = builds_list[idx](x[i])
                SH_list[idx] = np.conj(S_list[idx].T)

            cost = 0
            min_fid = 1
            for i, single_rho in enumerate(encoding.rhos_inputs):
                rho = np.copy(single_rho)

                for j, system in enumerate(system_list):
                    if len(system_list) > 1:

                        if system.system_type == "decoder" and j > 0:
                            pre_sys = system_list[j - 1]
                            rho_ta = pre_sys.encoding.lossless_to_targets(rho)
                            rho = system.apply_loss(rho=rho_ta, verbose=False)

                        rho = np.dot(np.dot(S_list[j], rho), SH_list[j])
                        rho = reset_ancillae(rho, system)
                    else:
                        rho = np.dot(np.dot(S_list[j], rho), SH_list[j])

                rho = system_list[-1].encoding.lossless_to_targets(rho)

                fidelity = np.abs(np.trace(np.dot(encoding.rhos_targets[i], rho)))
                if fidelity < min_fid:
                    min_fid = fidelity
                cost = cost + (1 - fidelity * np.abs(np.trace(rho)))

            if return_fidelity:
                return min_fid
            else:
                return cost / len(encoding.rhos_inputs)
Exemplo n.º 29
0
def best_tmat_phases(A, B, cols=True, rows=True, **kwargs):
    """Finds the angles `phi` and `psi` that minimize the Frobenius distance
    between A and B', where

    .. math::

        B' = \mathrm{diag}(\mathrm{exp}(i \phi)) B \mathrm{diag}(\mathrm{exp}(i \psi))

    :returns: Optimal value `B'` as well as minimal Frobenius distance
    """
    d = len(A)
    diagp = lambda phi: np.diag(np.exp(1.j * phi))
    B_ = lambda phi, psi: np.dot(diagp(phi), np.dot(B, diagp(psi)))
    norm_sq = lambda x: np.real(np.dot(np.conj(x.ravel()), x.ravel()))

    if cols and rows:
        cost = lambda x: norm_sq(A - B_(x[:d], x[d:]))
        init_angles = rand_angles(2 * d)
    elif rows:
        cost = lambda x: norm_sq(A - B_(x, np.zeros(d)))
        init_angles = rand_angles(d)
    elif cols:
        cost = lambda x: norm_sq(A - B_(np.zeros(d), x))
        init_angles = rand_angles(d)
    else:
        raise ValueError('Either rows or cols should be true')

    result = minimize(cost, init_angles, jac=grad(cost), **kwargs)

    if cols and rows:
        phi, psi = result['x'][:d], result['x'][d:]
    elif rows:
        phi, psi = result['x'], np.zeros(d)
    elif cols:
        phi, psi = np.zeros(d), result['x']

    # Normalization 1 / np.sqrt(2) due to real embedding
    return B_(phi, psi), result['fun'] / np.sqrt(2)
Exemplo n.º 30
0
Arquivo: fca.py Projeto: zuoshifan/FCA
def free_entropy_rectangular(X):
    """Compute free entropy chi of a rectrangular matrix X."""
    N, M = X.shape
    # XXH = np.dot(X, X.T.conj())
    XXH = np.dot(X, np.conj(X.T))
    # e, U = la.eigh(XXH)
    e, U = np.linalg.eigh(XXH)
    # U, s, VT = np.linalg.svd(X, full_matrices=False)
    # e = s[s>0.0]**2
    # e = np.unique(e[e>0]) # only unique positive ones
    e = e[e > 0.0]  # only positive ones
    # e = e[e>1.0e-8*e[-1]] # drop very small ones
    a, b = 1.0 * N / (N + M), 1.0 * M / (N + M)
    # sum_log_diff = 2 * np.sum([ np.log(np.abs(e[i] - e[j])) for i in range(N) for j in range(i+1, N) ])
    N1 = len(e)
    sum_log_diff = 2 * np.sum([
        np.log(np.abs(e[i] - e[j])) for i in range(N1)
        for j in range(i + 1, N1)
    ])
    sum_log = np.sum(np.log(e))
    chi = a**2 / (N * (N - 1)) * sum_log_diff + ((b - a) * a / N) * sum_log

    return chi
Exemplo n.º 31
0
    def g_out_antihess(y):
        lp = snp_log_probs(y)
        ret = 0.0
        for l in seg_sites._get_likelihood_sequences(lp):
            L = len(l)
            lc = make_constant(l)

            fft = np.fft.fft(l)
            # (assumes l is REAL)
            assert np.all(np.imag(l) == 0.0)
            fft_rev = np.conj(fft) * np.exp(
                2 * np.pi * 1j * np.arange(L) / float(L))

            curr = 0.5 * (fft * fft_rev - fft * make_constant(fft_rev) -
                          make_constant(fft) * fft_rev)
            curr = np.fft.ifft(curr)[(L - 1)::-1]

            # make real
            assert np.allclose(np.imag(curr / L), 0.0)
            curr = np.real(curr)
            curr = curr[0] + 2.0 * np.sum(curr[1:int(np.sqrt(L))])
            ret = ret + curr
        return ret
Exemplo n.º 32
0
 def L1(x):
     return np.sqrt(np.dot(x,np.conj(x)))