コード例 #1
0
def BCFIM(x, p, rho, drho, M=[], eps=1e-8):
    r"""
    Calculation of the Bayesian classical Fisher information (BCFI) and the 
    Bayesian classical Fisher information matrix (BCFIM) of the form
    \begin{align}
    \mathcal{I}_{\mathrm{Bayes}}=\int p(\textbf{x})\mathcal{I}\mathrm{d}\textbf{x}
    \end{align}
    
    with $\mathcal{I}$ the CFIM and $p(\textbf{x})$ the prior distribution.

    Parameters
    ----------
    > **x:** `list`
        -- The regimes of the parameters for the integral.

    > **p:** `multidimensional array`
        -- The prior distribution.

    > **rho:** `multidimensional list`
        -- Parameterized density matrix.

    > **drho:** `multidimensional list`
        -- Derivatives of the parameterized density matrix (rho) with respect to the unknown
        parameters to be estimated.

    > **M:** `list of matrices`
        -- A set of positive operator-valued measure (POVM). The default measurement 
        is a set of rank-one symmetric informationally complete POVM (SIC-POVM).

    > **eps:** `float`
        -- Machine epsilon.

    Returns
    ----------
    **BCFI or BCFIM:** `float or matrix`
        -- For single parameter estimation (the length of x is equal to one), the output 
        is BCFI and for multiparameter estimation (the length of x is more than one), 
        it returns BCFIM.

    **Note:** 
        SIC-POVM is calculated by the Weyl-Heisenberg covariant SIC-POVM fiducial state 
        which can be downloaded from [here](http://www.physics.umb.edu/Research/QBism/
        solutions.html).
    """

    para_num = len(x)
    if para_num == 1:
        #### single parameter scenario ####
        if M == []:
            M = SIC(len(rho[0]))
        else:
            if type(M) != list:
                raise TypeError("Please make sure M is a list!")

        p_num = len(p)
        if type(drho[0]) == list:
            drho = [drho[i][0] for i in range(p_num)]
        p_num = len(p)
        F_tp = np.zeros(p_num)
        for m in range(p_num):
            F_tp[m] = CFIM(rho[m], [drho[m]], M=M, eps=eps)

        arr = [p[i] * F_tp[i] for i in range(p_num)]
        return simps(arr, x[0])
    else:
        #### multiparameter scenario ####
        p_shape = np.shape(p)
        p_ext = extract_ele(p, para_num)
        rho_ext = extract_ele(rho, para_num)
        drho_ext = extract_ele(drho, para_num)

        p_list, rho_list, drho_list = [], [], []
        for p_ele, rho_ele, drho_ele in zip(p_ext, rho_ext, drho_ext):
            p_list.append(p_ele)
            rho_list.append(rho_ele)
            drho_list.append(drho_ele)

        dim = len(rho_list[0])
        if M == []:
            M = SIC(dim)
        else:
            if type(M) != list:
                raise TypeError("Please make sure M is a list!")

        F_list = [[[0.0 for i in range(len(p_list))] for j in range(para_num)]
                  for k in range(para_num)]
        for i in range(len(p_list)):
            F_tp = CFIM(rho_list[i], drho_list[i], M=M, eps=eps)
            for pj in range(para_num):
                for pk in range(para_num):
                    F_list[pj][pk][i] = F_tp[pj][pk]

        BCFIM_res = np.zeros([para_num, para_num])
        for para_i in range(0, para_num):
            for para_j in range(para_i, para_num):
                F_ij = np.array(F_list[para_i][para_j]).reshape(p_shape)
                arr = p * F_ij
                for si in reversed(range(para_num)):
                    arr = simps(arr, x[si])
                BCFIM_res[para_i][para_j] = arr
                BCFIM_res[para_j][para_i] = arr
        return BCFIM_res
コード例 #2
0
def QVTB(x, p, dp, rho, drho, LDtype="SLD", eps=1e-8):
    r"""
    Calculation of the Bayesian version of quantum Cramer-Rao bound introduced 
    by Van Trees (QVTB). The covariance matrix with a prior distribution p(\textbf{x}) 
    is defined as
    \begin{align}
    \mathrm{cov}(\hat{\textbf{x}},\{\Pi_y\})=\int p(\textbf{x})\sum_y\mathrm{Tr}
    (\rho\Pi_y)(\hat{\textbf{x}}-\textbf{x})(\hat{\textbf{x}}-\textbf{x})^{\mathrm{T}}
    \mathrm{d}\textbf{x}
    \end{align}

    where $\textbf{x}=(x_0,x_1,\dots)^{\mathrm{T}}$ are the unknown parameters to be estimated
    and the integral $\int\mathrm{d}\textbf{x}:=\iiint\mathrm{d}x_0\mathrm{d}x_1\cdots$.
    $\{\Pi_y\}$ is a set of positive operator-valued measure (POVM) and $\rho$ represent 
    the parameterized density matrix.

    \begin{align}
    \mathrm{cov}(\hat{\textbf{x}},\{\Pi_y\})\geq \left(\mathcal{I}_{\mathrm{prior}}
    +\mathcal{F}_{\mathrm{Bayes}}\right)^{-1},
    \end{align}

    where $\mathcal{I}_{\mathrm{prior}}=\int p(\textbf{x})\mathcal{I}_{p}\mathrm{d}\textbf{x}$ is 
    the CFIM for $p(\textbf{x})$ and $\mathcal{F}_{\mathrm{Bayes}}=\int p(\textbf{x})\mathcal{F}
    \mathrm{d}\textbf{x}$ is the average QFIM of all types.

    Parameters
    ----------
    > **x:** `list`
        -- The regimes of the parameters for the integral.

    > **p:** multidimensional array
        -- The prior distribution.

    > **dp:** `list`
        -- Derivatives of the prior distribution with respect to the unknown parameters to to
        estimated. For example, dp[0] is the derivative vector with respect to the first 
        parameter.

    > **rho:** `multidimensional list`
        -- Parameterized density matrix.

    > **drho:** `multidimensional list`
        -- Derivatives of the parameterized density matrix (rho) with respect to the unknown
        parameters to be estimated.

    > **LDtype:** `string`
        -- Types of QFI (QFIM) can be set as the objective function. Options are:  
        "SLD" (default) -- QFI (QFIM) based on symmetric logarithmic derivative (SLD).  
        "RLD" -- QFI (QFIM) based on right logarithmic derivative (RLD).  
        "LLD" -- QFI (QFIM) based on left logarithmic derivative (LLD).

    > **eps:** `float`
        -- Machine epsilon.

    Returns
    ----------
    **QVTB:** `float or matrix`
        -- For single parameter estimation (the length of x is equal to one), the 
        output is a float and for multiparameter estimation (the length of x is 
        more than one), it returns a matrix.
    """
    para_num = len(x)
    p_num = len(p)

    if para_num == 1:
        if type(drho[0]) == list:
            drho = [drho[i][0] for i in range(p_num)]
        if type(dp[0]) == list or type(dp[0]) == np.ndarray:
            dp = [dp[i][0] for i in range(p_num)]

        F_tp = np.zeros(p_num)
        for m in range(p_num):
            F_tp[m] = QFIM(rho[m], [drho[m]], LDtype=LDtype, eps=eps)

        arr1 = [np.real(dp[i] * dp[i] / p[i]) for i in range(p_num)]
        I = simps(arr1, x[0])
        arr2 = [np.real(F_tp[j] * p[j]) for j in range(p_num)]
        F = simps(arr2, x[0])
        return 1.0 / (I + F)
    else:
        #### multiparameter scenario ####
        p_shape = np.shape(p)
        p_ext = extract_ele(p, para_num)
        dp_ext = extract_ele(dp, para_num)
        rho_ext = extract_ele(rho, para_num)
        drho_ext = extract_ele(drho, para_num)

        p_list, dp_list, rho_list, drho_list, = (
            [],
            [],
            [],
            [],
        )
        for p_ele, dp_ele, rho_ele, drho_ele in zip(p_ext, dp_ext, rho_ext,
                                                    drho_ext):
            p_list.append(p_ele)
            dp_list.append(dp_ele)
            rho_list.append(rho_ele)
            drho_list.append(drho_ele)

        F_list = [[[0.0 for i in range(len(p_list))] for j in range(para_num)]
                  for k in range(para_num)]
        I_list = [[[0.0 for i in range(len(p_list))] for j in range(para_num)]
                  for k in range(para_num)]
        for i in range(len(p_list)):
            F_tp = QFIM(rho_list[i], drho_list[i], LDtype=LDtype, eps=eps)
            for pj in range(para_num):
                for pk in range(para_num):
                    F_list[pj][pk][i] = F_tp[pj][pk]
                    I_list[pj][pk][i] = (dp_list[i][pj] * dp_list[i][pk] /
                                         p_list[i]**2)

        F_res = np.zeros([para_num, para_num])
        I_res = np.zeros([para_num, para_num])
        for para_i in range(0, para_num):
            for para_j in range(para_i, para_num):
                F_ij = np.array(F_list[para_i][para_j]).reshape(p_shape)
                I_ij = np.array(I_list[para_i][para_j]).reshape(p_shape)
                arr1 = p * F_ij
                arr2 = p * I_ij
                for si in reversed(range(para_num)):
                    arr1 = simps(arr1, x[si])
                    arr2 = simps(arr2, x[si])
                F_res[para_i][para_j] = arr1
                F_res[para_j][para_i] = arr1
                I_res[para_i][para_j] = arr2
                I_res[para_j][para_i] = arr2
        return np.linalg.pinv(F_res + I_res)
コード例 #3
0
def BQCRB(x, p, dp, rho, drho, b=[], db=[], btype=1, LDtype="SLD", eps=1e-8):
    r"""
    Calculation of the Bayesian quantum Cramer-Rao bound (BQCRB). The covariance matrix 
    with a prior distribution $p(\textbf{x})$ is defined as
    \begin{align}
    \mathrm{cov}(\hat{\textbf{x}},\{\Pi_y\})=\int p(\textbf{x})\sum_y\mathrm{Tr}
    (\rho\Pi_y)(\hat{\textbf{x}}-\textbf{x})(\hat{\textbf{x}}-\textbf{x})^{\mathrm{T}}
    \mathrm{d}\textbf{x}
    \end{align}

    where $\textbf{x}=(x_0,x_1,\dots)^{\mathrm{T}}$ are the unknown parameters to be estimated
    and the integral $\int\mathrm{d}\textbf{x}:=\iiint\mathrm{d}x_0\mathrm{d}x_1\cdots$.
    $\{\Pi_y\}$ is a set of positive operator-valued measure (POVM) and $\rho$ represent 
    the parameterized density matrix.

    This function calculates three types of the BQCRB. The first one is
    \begin{align}
    \mathrm{cov}(\hat{\textbf{x}},\{\Pi_y\})\geq\int p(\textbf{x})\left(B\mathcal{F}^{-1}B
    +\textbf{b}\textbf{b}^{\mathrm{T}}\right)\mathrm{d}\textbf{x},
    \end{align}
        
    where $\textbf{b}$ and $\textbf{b}'$ are the vectors of biase and its derivatives on parameters.
    $B$ is a diagonal matrix with the $i$th entry $B_{ii}=1+[\textbf{b}']_{i}$ and $\mathcal{F}$
    is the QFIM for all types.

    The second one is
    \begin{align}
    \mathrm{cov}(\hat{\textbf{x}},\{\Pi_y\})\geq \mathcal{B}\,\mathcal{F}_{\mathrm{Bayes}}^{-1}\,
    \mathcal{B}+\int p(\textbf{x})\textbf{b}\textbf{b}^{\mathrm{T}}\mathrm{d}\textbf{x},
    \end{align}

    where $\mathcal{B}=\int p(\textbf{x})B\mathrm{d}\textbf{x}$ is the average of $B$ and 
    $\mathcal{F}_{\mathrm{Bayes}}=\int p(\textbf{x})\mathcal{F}\mathrm{d}\textbf{x}$ is 
    the average QFIM.

    The third one is
    \begin{align}
    \mathrm{cov}(\hat{\textbf{x}},\{\Pi_y\})\geq \int p(\textbf{x})
    \mathcal{G}\left(\mathcal{I}_p+\mathcal{F}\right)^{-1}\mathcal{G}^{\mathrm{T}}\mathrm{d}\textbf{x}
    \end{align}

    with $[\mathcal{I}_{p}]_{ab}:=[\partial_a \ln p(\textbf{x})][\partial_b \ln p(\textbf{x})]$ and
    $\mathcal{G}_{ab}:=[\partial_b\ln p(\textbf{x})][\textbf{b}]_a+B_{aa}\delta_{ab}$.

    Parameters
    ----------
    > **x:** `list`
        -- The regimes of the parameters for the integral.

    > **p:** `multidimensional array`
        -- The prior distribution.

    > **rho:** `multidimensional list`
        -- Parameterized density matrix.

    > **drho:** `multidimensional list`
        -- Derivatives of the parameterized density matrix (rho) with respect to the unknown
        parameters to be estimated.

    > **b:** `list`
        -- Vector of biases of the form $\textbf{b}=(b(x_0),b(x_1),\dots)^{\mathrm{T}}$.
        
    > **db:** `list`
        -- Derivatives of b with respect to the unknown parameters to be estimated, It should be 
        expressed as $\textbf{b}'=(\partial_0 b(x_0),\partial_1 b(x_1),\dots)^{\mathrm{T}}$.

    > **btype:** `int (1, 2, 3)`
        -- Types of the BQCRB. Options are:  
        1 (default) -- It means to calculate the first type of the BQCRB.  
        2 -- It means to calculate the second type of the BQCRB.
        3 -- It means to calculate the third type of the BCRB.

    > **LDtype:** `string`
        -- Types of QFI (QFIM) can be set as the objective function. Options are:  
        "SLD" (default) -- QFI (QFIM) based on symmetric logarithmic derivative (SLD).  
        "RLD" -- QFI (QFIM) based on right logarithmic derivative (RLD).  
        "LLD" -- QFI (QFIM) based on left logarithmic derivative (LLD).

    > **eps:** `float`
        -- Machine epsilon.

    Returns
    ----------
    **BQCRB:** `float or matrix`
        -- For single parameter estimation (the length of x is equal to one), the 
        output is a float and for multiparameter estimation (the length of x is 
        more than one), it returns a matrix.
    """

    para_num = len(x)

    if para_num == 1:
        #### single parameter scenario ####
        p_num = len(p)

        if b == []:
            b = np.zeros(p_num)
            db = np.zeros(p_num)
        if b != [] and db == []:
            db = np.zeros(p_num)

        if type(drho[0]) == list:
            drho = [drho[i][0] for i in range(p_num)]
        if type(b[0]) == list or type(b[0]) == np.ndarray:
            b = b[0]
        if type(db[0]) == list or type(db[0]) == np.ndarray:
            db = db[0]

        F_tp = np.zeros(p_num)
        for m in range(p_num):
            F_tp[m] = QFIM(rho[m], [drho[m]], LDtype=LDtype, eps=eps)

        if btype == 1:
            arr = [
                p[i] * ((1 + db[i])**2 / F_tp[i] + b[i]**2)
                for i in range(p_num)
            ]
            F = simps(arr, x[0])
            return F
        elif btype == 2:
            arr2 = [p[i] * F_tp[i] for i in range(p_num)]
            F2 = simps(arr2, x[0])
            arr2 = [p[j] * (1 + db[j]) for j in range(p_num)]
            B = simps(arr2, x[0])
            arr3 = [p[k] * b[k]**2 for k in range(p_num)]
            bb = simps(arr3, x[0])
            F = B**2 / F2 + bb
            return F
        elif btype == 3:
            I_tp = [np.real(dp[i] * dp[i] / p[i]**2) for i in range(p_num)]
            arr = [
                p[j] * (dp[j] * b[j] / p[j] + (1 + db[j]))**2 /
                (I_tp[j] + F_tp[j]) for j in range(p_num)
            ]
            F = simps(arr, x[0])
            return F
        else:
            raise NameError("NameError: btype should be choosen in {1, 2, 3}.")
    else:
        #### multiparameter scenario ####
        if b == []:
            b, db = [], []
            for i in range(para_num):
                b.append(np.zeros(len(x[i])))
                db.append(np.zeros(len(x[i])))
        if b != [] and db == []:
            db = []
            for i in range(para_num):
                db.append(np.zeros(len(x[i])))

        p_shape = np.shape(p)
        p_ext = extract_ele(p, para_num)
        dp_ext = extract_ele(dp, para_num)
        rho_ext = extract_ele(rho, para_num)
        drho_ext = extract_ele(drho, para_num)
        b_pro = product(*b)
        db_pro = product(*db)

        p_list, dp_list, rho_list, drho_list = [], [], [], []
        for p_ele, dp_ele, rho_ele, drho_ele in zip(p_ext, dp_ext, rho_ext,
                                                    drho_ext):
            p_list.append(p_ele)
            dp_list.append(dp_ele)
            rho_list.append(rho_ele)
            drho_list.append(drho_ele)

        b_list, db_list = [], []
        for b_ele, db_ele in zip(b_pro, db_pro):
            b_list.append([b_ele[i] for i in range(para_num)])
            db_list.append([db_ele[j] for j in range(para_num)])

        if btype == 1:
            F_list = [[[0.0 for i in range(len(p_list))]
                       for j in range(para_num)] for k in range(para_num)]
            for i in range(len(p_list)):
                F_tp = QFIM(rho_list[i], drho_list[i], LDtype=LDtype, eps=eps)
                F_inv = np.linalg.pinv(F_tp)
                B = np.diag([(1.0 + db_list[i][j]) for j in range(para_num)])
                term1 = np.dot(B, np.dot(F_inv, B))
                term2 = np.dot(
                    np.array(b_list[i]).reshape(para_num, 1),
                    np.array(b_list[i]).reshape(1, para_num),
                )
                for pj in range(para_num):
                    for pk in range(para_num):
                        F_list[pj][pk][i] = term1[pj][pk] + term2[pj][pk]

            res = np.zeros([para_num, para_num])
            for para_i in range(0, para_num):
                for para_j in range(para_i, para_num):
                    F_ij = np.array(F_list[para_i][para_j]).reshape(p_shape)
                    arr = p * F_ij
                    for si in reversed(range(para_num)):
                        arr = simps(arr, x[si])
                    res[para_i][para_j] = arr
                    res[para_j][para_i] = arr
            return res
        elif btype == 2:
            F_list = [[[0.0 for i in range(len(p_list))]
                       for j in range(para_num)] for k in range(para_num)]
            B_list = [[[0.0 for i in range(len(p_list))]
                       for j in range(para_num)] for k in range(para_num)]
            bb_list = [[[0.0 for i in range(len(p_list))]
                        for j in range(para_num)] for k in range(para_num)]
            for i in range(len(p_list)):
                F_tp = QFIM(rho_list[i], drho_list[i], LDtype=LDtype, eps=eps)
                B_tp = np.diag([(1.0 + db_list[i][j])
                                for j in range(para_num)])
                bb_tp = np.dot(
                    np.array(b_list[i]).reshape(para_num, 1),
                    np.array(b_list[i]).reshape(1, para_num),
                )
                for pj in range(para_num):
                    for pk in range(para_num):
                        F_list[pj][pk][i] = F_tp[pj][pk]
                        B_list[pj][pk][i] = B_tp[pj][pk]
                        bb_list[pj][pk][i] = bb_tp[pj][pk]

            F_res = np.zeros([para_num, para_num])
            for para_i in range(0, para_num):
                for para_j in range(para_i, para_num):
                    F_ij = np.array(F_list[para_i][para_j]).reshape(p_shape)
                    arr = p * F_ij
                    for si in reversed(range(para_num)):
                        arr = simps(arr, x[si])
                    F_res[para_i][para_j] = arr
                    F_res[para_j][para_i] = arr
            B_res = np.zeros([para_num, para_num])
            bb_res = np.zeros([para_num, para_num])
            for para_m in range(para_num):
                for para_n in range(para_num):
                    B_mn = np.array(B_list[para_m][para_n]).reshape(p_shape)
                    bb_mn = np.array(bb_list[para_m][para_n]).reshape(p_shape)
                    arr2 = p * B_mn
                    arr3 = p * bb_mn
                    for sj in reversed(range(para_num)):
                        arr2 = simps(arr2, x[sj])
                        arr3 = simps(arr3, x[sj])
                    B_res[para_m][para_n] = arr2
                    bb_res[para_m][para_n] = arr3
            res = np.dot(B_res, np.dot(np.linalg.pinv(F_res), B_res)) + bb_res
            return res
        elif btype == 3:
            F_list = [[[0.0 for i in range(len(p_list))]
                       for j in range(para_num)] for k in range(para_num)]
            for i in range(len(p_list)):
                F_tp = QFIM(rho_list[i], drho_list[i], LDtype=LDtype, eps=eps)
                I_tp = np.zeros((para_num, para_num))
                G_tp = np.zeros((para_num, para_num))
                for pm in range(para_num):
                    for pn in range(para_num):
                        if pm == pn:
                            G_tp[pm][pn] = dp_list[i][pn] * b_list[i][
                                pm] / p_list[i] + (1.0 + db_list[i][pm])
                        else:
                            G_tp[pm][pn] = dp_list[i][pn] * b_list[i][
                                pm] / p_list[i]
                        I_tp[pm][pn] = dp_list[i][pm] * dp_list[i][
                            pn] / p_list[i]**2

                F_tot = np.dot(G_tp, np.dot(np.linalg.pinv(F_tp + I_tp),
                                            G_tp.T))
                for pj in range(para_num):
                    for pk in range(para_num):
                        F_list[pj][pk][i] = F_tot[pj][pk]

            res = np.zeros([para_num, para_num])
            for para_i in range(0, para_num):
                for para_j in range(para_i, para_num):
                    F_ij = np.array(F_list[para_i][para_j]).reshape(p_shape)
                    arr = p * F_ij
                    for si in reversed(range(para_num)):
                        arr = simps(arr, x[si])
                    res[para_i][para_j] = arr
                    res[para_j][para_i] = arr
            return res
        else:
            raise NameError("NameError: btype should be choosen in {1, 2, 3}.")
コード例 #4
0
def VTB(x, p, dp, rho, drho, M=[], eps=1e-8):
    r"""
    Calculation of the Bayesian version of Cramer-Rao bound introduced by
    Van Trees (VTB). The covariance matrix with a prior distribution $p(\textbf{x})$ 
    is defined as
    \begin{align}
    \mathrm{cov}(\hat{\textbf{x}},\{\Pi_y\})=\int p(\textbf{x})\sum_y\mathrm{Tr}
    (\rho\Pi_y)(\hat{\textbf{x}}-\textbf{x})(\hat{\textbf{x}}-\textbf{x})^{\mathrm{T}}
    \mathrm{d}\textbf{x}
    \end{align}

    where $\textbf{x}=(x_0,x_1,\dots)^{\mathrm{T}}$ are the unknown parameters to be estimated
    and the integral $\int\mathrm{d}\textbf{x}:=\iiint\mathrm{d}x_0\mathrm{d}x_1\cdots$.
    $\{\Pi_y\}$ is a set of positive operator-valued measure (POVM) and $\rho$ represent 
    the parameterized density matrix.

    \begin{align}
    \mathrm{cov}(\hat{\textbf{x}},\{\Pi_y\})\geq \left(\mathcal{I}_{\mathrm{prior}}
    +\mathcal{I}_{\mathrm{Bayes}}\right)^{-1},
    \end{align}

    where $\mathcal{I}_{\mathrm{prior}}=\int p(\textbf{x})\mathcal{I}_{p}\mathrm{d}\textbf{x}$ 
    is the CFIM for $p(\textbf{x})$ and 
    $\mathcal{I}_{\mathrm{Bayes}}=\int p(\textbf{x})\mathcal{I}\mathrm{d}\textbf{x}$ is the 
    average CFIM.

    Parameters
    ----------
    > **x:** `list`
        -- The regimes of the parameters for the integral.

    > **p:** `multidimensional array`
        -- The prior distribution.

    > **dp:** `list`
        -- Derivatives of the prior distribution with respect to the unknown parameters 
        to be estimated. For example, dp[0] is the derivative vector with respect to the first 
        parameter.

    > **rho:** `multidimensional list`
        -- Parameterized density matrix.

    > **drho:** `multidimensional list`
        -- Derivatives of the parameterized density matrix (rho) with respect to the 
        unknown parameters to be estimated.

    > **M:** `list of matrices`
        -- A set of positive operator-valued measure (POVM). The default measurement 
        is a set of rank-one symmetric informationally complete POVM (SIC-POVM).

    > **eps:** `float`
        -- Machine epsilon.

    Returns
    ----------
    **VTB:** `float or matrix`
        -- For single parameter estimation (the length of x is equal to one), the 
        output is a float and for multiparameter estimation (the length of x is 
        more than one), it returns a matrix.

    **Note:** 
        SIC-POVM is calculated by the Weyl-Heisenberg covariant SIC-POVM fiducial state 
        which can be downloaded from [here](http://www.physics.umb.edu/Research/QBism/
        solutions.html).
    """

    para_num = len(x)
    p_num = len(p)

    if para_num == 1:
        #### single parameter scenario ####
        if M == []:
            M = SIC(len(rho[0]))
        else:
            if type(M) != list:
                raise TypeError("Please make sure M is a list!")

        if type(drho[0]) == list:
            drho = [drho[i][0] for i in range(p_num)]
        if type(dp[0]) == list or type(dp[0]) == np.ndarray:
            dp = [dp[i][0] for i in range(p_num)]

        F_tp = np.zeros(p_num)
        for m in range(p_num):
            F_tp[m] = CFIM(rho[m], [drho[m]], M=M, eps=eps)

        arr1 = [np.real(dp[i] * dp[i] / p[i]) for i in range(p_num)]
        I = simps(arr1, x[0])
        arr2 = [np.real(F_tp[j] * p[j]) for j in range(p_num)]
        F = simps(arr2, x[0])
        return 1.0 / (I + F)
    else:
        #### multiparameter scenario ####
        p_shape = np.shape(p)
        p_ext = extract_ele(p, para_num)
        dp_ext = extract_ele(dp, para_num)
        rho_ext = extract_ele(rho, para_num)
        drho_ext = extract_ele(drho, para_num)

        p_list, dp_list, rho_list, drho_list = [], [], [], []
        for p_ele, dp_ele, rho_ele, drho_ele in zip(p_ext, dp_ext, rho_ext,
                                                    drho_ext):
            p_list.append(p_ele)
            dp_list.append(dp_ele)
            rho_list.append(rho_ele)
            drho_list.append(drho_ele)

        dim = len(rho_list[0])
        if M == []:
            M = SIC(dim)
        else:
            if type(M) != list:
                raise TypeError("Please make sure M is a list!")

        F_list = [[[0.0 for i in range(len(p_list))] for j in range(para_num)]
                  for k in range(para_num)]
        I_list = [[[0.0 for i in range(len(p_list))] for j in range(para_num)]
                  for k in range(para_num)]
        for i in range(len(p_list)):
            F_tp = CFIM(rho_list[i], drho_list[i], M=M, eps=eps)
            for pj in range(para_num):
                for pk in range(para_num):
                    F_list[pj][pk][i] = F_tp[pj][pk]
                    I_list[pj][pk][i] = (dp_list[i][pj] * dp_list[i][pk] /
                                         p_list[i]**2)

        F_res = np.zeros([para_num, para_num])
        I_res = np.zeros([para_num, para_num])
        for para_i in range(0, para_num):
            for para_j in range(para_i, para_num):
                F_ij = np.array(F_list[para_i][para_j]).reshape(p_shape)
                I_ij = np.array(I_list[para_i][para_j]).reshape(p_shape)
                arr1 = p * F_ij
                arr2 = p * I_ij
                for si in reversed(range(para_num)):
                    arr1 = simps(arr1, x[si])
                    arr2 = simps(arr2, x[si])
                F_res[para_i][para_j] = arr1
                F_res[para_j][para_i] = arr1
                I_res[para_i][para_j] = arr2
                I_res[para_j][para_i] = arr2
        return np.linalg.pinv(F_res + I_res)
コード例 #5
0
def adaptive_Kraus(x, p, M, rho0, K, dK, W, max_episode, eps, savefile):
    para_num = len(x)
    dim = np.shape(rho0)[0]
    if para_num == 1:
        #### singleparameter senario ####
        p_num = len(p)
        F = []
        for hi in range(p_num):
            rho_tp = sum([np.dot(Ki, np.dot(rho0, Ki.conj().T)) for Ki in K[hi]])
            drho_tp = [
                sum(
                    [
                        (
                            np.dot(dKi, np.dot(rho0, Ki.conj().T))
                            + np.dot(Ki, np.dot(rho0, dKi.conj().T))
                        )
                        for (Ki, dKi) in zip(K[hi], dKj)
                    ]
                )
                for dKj in dK[hi]
            ]
            F_tp = CFIM(rho_tp, drho_tp, M)
            F.append(F_tp)

        idx = np.argmax(F)
        x_opt = x[0][idx]
        print("The optimal parameter is %s" % x_opt)

        u = 0.0
        if savefile == False:
            y, xout = [], []
            for ei in range(max_episode):
                rho = [np.zeros((dim, dim), dtype=np.complex128) for i in range(p_num)]
                for hj in range(p_num):
                    x_idx = np.argmin(np.abs(x[0] - (x[0][hj] + u)))
                    rho_tp = sum([np.dot(Ki, np.dot(rho0, Ki.conj().T)) for Ki in K[x_idx]])
                    rho[hj] = rho_tp
                print("The tunable parameter is %s" % u)
                res_exp = input("Please enter the experimental result: ")
                res_exp = int(res_exp)
                pyx = np.zeros(p_num)
                for xi in range(p_num):
                    pyx[xi] = np.real(np.trace(np.dot(rho[xi], M[res_exp])))

                arr = [pyx[m] * p[m] for m in range(p_num)]
                py = simps(arr, x[0])
                p_update = pyx * p / py
                p = p_update
                p_idx = np.argmax(p)
                x_out = x[0][p_idx]
                print("The estimator is %s (%d episodes)" % (x_out, ei))
                u = x_opt - x_out

                if (ei + 1) % 50 == 0:
                    if (x_out + u) > x[0][-1] and (x_out + u) < x[0][0]:
                        raise ValueError("please increase the regime of the parameters.")

                xout.append(x_out)
                y.append(res_exp)
            fp = open('pout.csv','a')
            fp.write('\n')
            np.savetxt(fp, np.array(p))
            fp.close()

            fx = open('xout.csv','a')
            fx.write('\n')
            np.savetxt(fx, np.array(xout))
            fx.close()

            fy = open('y.csv','a')
            fy.write('\n')
            np.savetxt(fy, np.array(y))
            fy.close()
        else:
            for ei in range(max_episode):
                rho = [np.zeros((dim, dim), dtype=np.complex128) for i in range(p_num)]
                for hj in range(p_num):
                    x_idx = np.argmin(np.abs(x[0] - (x[0][hj] + u)))
                    rho_tp = sum([np.dot(Ki, np.dot(rho0, Ki.conj().T)) for Ki in K[x_idx]])
                    rho[hj] = rho_tp
                print("The tunable parameter is %s" % u)
                res_exp = input("Please enter the experimental result: ")
                res_exp = int(res_exp)
                pyx = np.zeros(p_num)
                for xi in range(p_num):
                    pyx[xi] = np.real(np.trace(np.dot(rho[xi], M[res_exp])))

                arr = [pyx[m] * p[m] for m in range(p_num)]
                py = simps(arr, x[0])
                p_update = pyx * p / py
                p = p_update
                p_idx = np.argmax(p)
                x_out = x[0][p_idx]
                print("The estimator is %s (%d episodes)" % (x_out, ei))
                u = x_opt - x_out

                if (ei + 1) % 50 == 0:
                    if (x_out + u) > x[0][-1] and (x_out + u) < x[0][0]:
                        raise ValueError("please increase the regime of the parameters.")

                fp = open('pout.csv','a')
                fp.write('\n')
                np.savetxt(fp, [np.array(p)])
                fp.close()

                fx = open('xout.csv','a')
                fx.write('\n')
                np.savetxt(fx, [x_out])
                fx.close()

                fy = open('y.csv','a')
                fy.write('\n')
                np.savetxt(fy, [res_exp])
                fy.close()
    else:
        #### miltiparameter senario ####
        p_shape = np.shape(p)
        x_list = []
        for x_tp in product(*x):
            x_list.append([x_tp[i] for i in range(para_num)])

        p_ext = extract_ele(p, para_num)
        K_ext = extract_ele(K, para_num)
        dK_ext = extract_ele(dK, para_num)

        p_list, K_list, dK_list = [], [], []
        for p_ele, K_ele, dK_ele in zip(p_ext, K_ext, dK_ext):
            p_list.append(p_ele)
            K_list.append(K_ele)
            dK_list.append(dK_ele)
        k_num = len(K_list[0])
        F = []
        for hi in range(len(p_list)):
            rho_tp = sum([np.dot(Ki, np.dot(rho0, Ki.conj().T)) for Ki in K_list[hi]])
            dK_reshape = [
                        [dK_list[hi][i][j] for i in range(k_num)]
                        for j in range(para_num)
                    ]
            drho_tp = [
                sum(
                    [
                        np.dot(dKi, np.dot(rho0, Ki.conj().T))
                        + np.dot(Ki, np.dot(rho0, dKi.conj().T))
                        for (Ki, dKi) in zip(K_list[hi], dKj)
                    ]
                )
                for dKj in dK_reshape
            ]
            F_tp = CFIM(rho_tp, drho_tp, M)
            if np.linalg.det(F_tp) < eps:
                F.append(eps)
            else:
                F.append(1.0 / np.trace(np.dot(W, np.linalg.inv(F_tp))))
        F = np.array(F).reshape(p_shape)
        idx = np.unravel_index(F.argmax(), F.shape)
        x_opt = [x[i][idx[i]] for i in range(para_num)]
        print("The optimal parameter is %s" % (x_opt))
        u = [0.0 for i in range(para_num)]

        if savefile == False:
            y, xout = [], []
            for ei in range(max_episode):
                rho = [
                    np.zeros((dim, dim), dtype=np.complex128) for i in range(len(p_list))
                ]
                for hj in range(len(p_list)):
                    idx_list = [
                        np.argmin(np.abs(x[i] - (x_list[hj][i] + u[i])))
                        for i in range(para_num)
                    ]
                    x_idx = int(
                        sum(
                            [
                                idx_list[i] * np.prod(np.array(p_shape[(i + 1) :]))
                                for i in range(para_num)
                            ]
                        )
                    )
                    rho[hj] = sum(
                        [np.dot(Ki, np.dot(rho0, Ki.conj().T)) for Ki in K_list[x_idx]]
                    )
                print("The tunable parameter are %s" % (u))
                res_exp = input("Please enter the experimental result: ")
                res_exp = int(res_exp)
                pyx_list = np.zeros(len(p_list))
                for xi in range(len(p_list)):
                    pyx_list[xi] = np.real(np.trace(np.dot(rho[xi], M[res_exp])))
                pyx = pyx_list.reshape(p_shape)
                arr = p * pyx
                for si in reversed(range(para_num)):
                    arr = simps(arr, x[si])
                py = arr
                p_update = p * pyx / py
                p = p_update
                p_idx = np.unravel_index(p.argmax(), p.shape)
                x_out = [x[i][p_idx[i]] for i in range(para_num)]

                print("The estimator are %s (%d episodes)" % (x_out, ei))
                u = np.array(x_opt) - np.array(x_out)

                if (ei + 1) % 50 == 0:
                    for un in range(para_num):
                        if (x_out[un] + u[un]) > x[un][-1] and (x_out[un] + u[un]) < x[un][
                            0
                        ]:
                            raise ValueError(
                                "please increase the regime of the parameters."
                            )
                xout.append(x_out)
                y.append(res_exp)
            fp = open('pout.csv','a')
            fp.write('\n')
            np.savetxt(fp, np.array(p))
            fp.close()

            fx = open('xout.csv','a')
            fx.write('\n')
            np.savetxt(fx, np.array(xout))
            fx.close()

            fy = open('y.csv','a')
            fy.write('\n')
            np.savetxt(fy, np.array(y))
            fy.close()
        else:
            for ei in range(max_episode):
                rho = [
                    np.zeros((dim, dim), dtype=np.complex128) for i in range(len(p_list))
                ]
                for hj in range(len(p_list)):
                    idx_list = [
                        np.argmin(np.abs(x[i] - (x_list[hj][i] + u[i])))
                        for i in range(para_num)
                    ]
                    x_idx = int(
                        sum(
                            [
                                idx_list[i] * np.prod(np.array(p_shape[(i + 1) :]))
                                for i in range(para_num)
                            ]
                        )
                    )
                    rho[hj] = sum(
                        [np.dot(Ki, np.dot(rho0, Ki.conj().T)) for Ki in K_list[x_idx]]
                    )
                print("The tunable parameter are %s" % (u))
                res_exp = input("Please enter the experimental result: ")
                res_exp = int(res_exp)
                pyx_list = np.zeros(len(p_list))
                for xi in range(len(p_list)):
                    pyx_list[xi] = np.real(np.trace(np.dot(rho[xi], M[res_exp])))
                pyx = pyx_list.reshape(p_shape)
                arr = p * pyx
                for si in reversed(range(para_num)):
                    arr = simps(arr, x[si])
                py = arr
                p_update = p * pyx / py
                p = p_update
                p_idx = np.unravel_index(p.argmax(), p.shape)
                x_out = [x[i][p_idx[i]] for i in range(para_num)]

                print("The estimator are %s (%d episodes)" % (x_out, ei))
                u = np.array(x_opt) - np.array(x_out)

                if (ei + 1) % 50 == 0:
                    for un in range(para_num):
                        if (x_out[un] + u[un]) > x[un][-1] and (x_out[un] + u[un]) < x[un][
                            0
                        ]:
                            raise ValueError(
                                "please increase the regime of the parameters."
                            )
                fp = open('pout.csv','a')
                fp.write('\n')
                np.savetxt(fp, [np.array(p)])
                fp.close()

                fx = open('xout.csv','a')
                fx.write('\n')
                np.savetxt(fx, [x_out])
                fx.close()

                fy = open('y.csv','a')
                fy.write('\n')
                np.savetxt(fy, [res_exp])
                fy.close()
コード例 #6
0
def BQFIM(x, p, rho, drho, LDtype="SLD", eps=1e-8):
    r"""
    Calculation of the Bayesian quantum Fisher information (BQFI) and the 
    Bayesian quantum Fisher information matrix (BQFIM) of the form
    \begin{align}
    \mathcal{F}_{\mathrm{Bayes}}=\int p(\textbf{x})\mathcal{F}\mathrm{d}\textbf{x}
    \end{align}
    
    with $\mathcal{F}$ the QFIM of all types and $p(\textbf{x})$ the prior distribution.

    Parameters
    ----------
    > **x:** `list`
        -- The regimes of the parameters for the integral.

    > **p:** `multidimensional array`
        -- The prior distribution.

    > **rho:** `multidimensional list`
        -- Parameterized density matrix.

    > **drho:** `multidimensional list`
        -- Derivatives of the parameterized density matrix (rho) with respect to the unknown
        parameters to be estimated.

    > **LDtype:** `string`
        -- Types of QFI (QFIM) can be set as the objective function. Options are:  
        "SLD" (default) -- QFI (QFIM) based on symmetric logarithmic derivative (SLD).  
        "RLD" -- QFI (QFIM) based on right logarithmic derivative (RLD).  
        "LLD" -- QFI (QFIM) based on left logarithmic derivative (LLD).

    > **eps:** `float`
        -- Machine epsilon.

    Returns
    ----------
    **BQFI or BQFIM:** `float or matrix`
        -- For single parameter estimation (the length of x is equal to one), the output 
        is BQFI and for multiparameter estimation (the length of x is more than one), 
        it returns BQFIM.
    """

    para_num = len(x)
    if para_num == 1:
        #### single parameter scenario ####
        p_num = len(p)
        if type(drho[0]) == list:
            drho = [drho[i][0] for i in range(p_num)]

        F_tp = np.zeros(p_num)
        for m in range(p_num):
            F_tp[m] = QFIM(rho[m], [drho[m]], LDtype=LDtype, eps=eps)
        arr = [p[i] * F_tp[i] for i in range(p_num)]
        return simps(arr, x[0])
    else:
        #### multiparameter scenario ####
        p_shape = np.shape(p)
        p_ext = extract_ele(p, para_num)
        rho_ext = extract_ele(rho, para_num)
        drho_ext = extract_ele(drho, para_num)

        p_list, rho_list, drho_list = [], [], []
        for p_ele, rho_ele, drho_ele in zip(p_ext, rho_ext, drho_ext):
            p_list.append(p_ele)
            rho_list.append(rho_ele)
            drho_list.append(drho_ele)

        F_list = [[[0.0 for i in range(len(p_list))] for j in range(para_num)]
                  for k in range(para_num)]
        for i in range(len(p_list)):
            F_tp = QFIM(rho_list[i], drho_list[i], LDtype=LDtype, eps=eps)
            for pj in range(para_num):
                for pk in range(para_num):
                    F_list[pj][pk][i] = F_tp[pj][pk]

        BQFIM_res = np.zeros([para_num, para_num])
        for para_i in range(0, para_num):
            for para_j in range(para_i, para_num):
                F_ij = np.array(F_list[para_i][para_j]).reshape(p_shape)
                arr = p * F_ij
                for si in reversed(range(para_num)):
                    arr = simps(arr, x[si])
                BQFIM_res[para_i][para_j] = arr
                BQFIM_res[para_j][para_i] = arr
        return BQFIM_res
コード例 #7
0
    def Mopt(self, W=[]):
        r"""
        Measurement optimization for the optimal x.

        Parameters
        ----------
        > **W:** `matrix`
            -- Weight matrix.
        """

        if W == []:
            W = np.identity(self.para_num)
        else:
            W = W

        if self.dynamic_type == "dynamics":
            if self.para_num == 1:
                F = []
                for i in range(len(self.H)):
                    dynamics = Lindblad(
                        self.tspan,
                        self.rho0,
                        self.H[i],
                        self.dH[i],
                        decay=self.decay,
                        Hc=self.Hc,
                        ctrl=self.ctrl,
                    )
                    rho_tp, drho_tp = dynamics.expm()
                    rho, drho = rho_tp[-1], drho_tp[-1]
                    F_tp = QFIM(rho, drho)
                    F.append(F_tp)
                idx = np.argmax(F)
                H_res, dH_res = self.H[idx], self.dH[idx]
            else:
                p_ext = extract_ele(self.p, self.para_num)
                H_ext = extract_ele(self.H, self.para_num)
                dH_ext = extract_ele(self.dH, self.para_num)

                p_list, H_list, dH_list = [], [], []
                for p_ele, H_ele, dH_ele in zip(p_ext, H_ext, dH_ext):
                    p_list.append(p_ele)
                    H_list.append(H_ele)
                    dH_list.append(dH_ele)

                F = []
                for i in range(len(p_list)):
                    dynamics = Lindblad(
                        self.tspan,
                        self.rho0,
                        self.H_list[i],
                        self.dH_list[i],
                        decay=self.decay,
                        Hc=self.Hc,
                        ctrl=self.ctrl,
                    )
                    rho_tp, drho_tp = dynamics.expm()
                    rho, drho = rho_tp[-1], drho_tp[-1]
                    F_tp = QFIM(rho, drho)
                    if np.linalg.det(F_tp) < self.eps:
                        F.append(self.eps)
                    else:
                        F.append(1.0 / np.trace(np.dot(W, np.linalg.inv(F_tp))))
                idx = np.argmax(F)
                H_res, dH_res = self.H_list[idx], self.dH_list[idx]
            m = MeasurementOpt(mtype="projection", minput=[], method="DE")
            m.dynamics(
                self.tspan,
                self.rho0,
                H_res,
                dH_res,
                Hc=self.Hc,
                ctrl=self.ctrl,
                decay=self.decay,
            )
            m.CFIM(W=W)
        elif self.dynamic_type == "Kraus":
            if self.para_num == 1:
                F = []
                for hi in range(len(self.K)):
                    rho_tp = sum(
                        [np.dot(Ki, np.dot(self.rho0, Ki.conj().T)) for Ki in self.K[hi]]
                    )
                    drho_tp = sum(
                        [
                            np.dot(dKi, np.dot(self.rho0, Ki.conj().T))
                            + np.dot(Ki, np.dot(self.rho0, dKi.conj().T))
                            for (Ki, dKi) in zip(self.K[hi], self.dK[hi])
                        ]
                    )
                    F_tp = QFIM(rho_tp, drho_tp)
                    F.append(F_tp)

                idx = np.argmax(F)
                K_res, dK_res = self.K[idx], self.dK[idx]
            else:
                p_shape = np.shape(self.p)

                p_ext = extract_ele(self.p, self.para_num)
                K_ext = extract_ele(self.K, self.para_num)
                dK_ext = extract_ele(self.dK, self.para_num)

                p_list, K_list, dK_list = [], [], []
                for K_ele, dK_ele in zip(K_ext, dK_ext):
                    p_list.append(p_ele)
                    K_list.append(K_ele)
                    dK_list.append(dK_ele)
                F = []
                for hi in range(len(p_list)):
                    rho_tp = sum(
                        [np.dot(Ki, np.dot(self.rho0, Ki.conj().T)) for Ki in K_list[hi]]
                    )
                    dK_reshape = [
                        [dK_list[hi][i][j] for i in range(self.k_num)]
                        for j in range(self.para_num)
                    ]
                    drho_tp = [
                        sum(
                            [
                                np.dot(dKi, np.dot(self.rho0, Ki.conj().T))
                                + np.dot(Ki, np.dot(self.rho0, dKi.conj().T))
                                for (Ki, dKi) in zip(K_list[hi], dKj)
                            ]
                        )
                        for dKj in dK_reshape
                    ]
                    F_tp = QFIM(rho_tp, drho_tp)
                    if np.linalg.det(F_tp) < self.eps:
                        F.append(self.eps)
                    else:
                        F.append(1.0 / np.trace(np.dot(W, np.linalg.inv(F_tp))))
                F = np.array(F).reshape(p_shape)
                idx = np.where(np.array(F) == np.max(np.array(F)))
                K_res, dK_res = self.K_list[idx], self.dK_list[idx]
            m = MeasurementOpt(mtype="projection", minput=[], method="DE")
            m.Kraus(self.rho0, K_res, dK_res, decay=self.decay)
            m.CFIM(W=W)
        else:
            raise ValueError(
                "{!r} is not a valid value for type of dynamics, supported values are 'dynamics' and 'Kraus'.".format(
                    self.dynamic_type
                )
            )
コード例 #8
0
def Bayes(x, p, rho, y, M=[], estimator="mean", savefile=False):
    """
    Bayesian estimation. The prior distribution is updated via the posterior  
    distribution obtained by the Bayes’ rule and the estimated value of parameters
    are updated via the expectation value of the distribution or maximum a 
    posteriori probability (MAP).

    Parameters
    ----------
    > **x:** `list`
        -- The regimes of the parameters for the integral.

    > **p:** `multidimensional array`
        -- The prior distribution.

    > **rho:** `multidimensional list`
        -- Parameterized density matrix.

    > **y:** `array`
        -- The experimental results obtained in practice.

    > **M:** `list of matrices`
        -- A set of positive operator-valued measure (POVM). The default measurement 
        is a set of rank-one symmetric informationally complete POVM (SIC-POVM).

    > **estimator:** `string`
        -- Estimators for the bayesian estimation. Options are:  
        "mean" -- The expectation value of the distribution.  
        "MAP" -- Maximum a posteriori probability.

    > **savefile:** `bool`
        -- Whether or not to save all the posterior distributions.  
        If set `True` then two files "pout.npy" and "xout.npy" will be generated including
        the posterior distributions and the estimated values in the iterations. If set 
        `False` the posterior distribution in the final iteration and the estimated values
        in all iterations will be saved in "pout.npy" and "xout.npy". 

    Returns
    ----------
    **pout and xout:** `array and float`
        -- The posterior distribution and the estimated values in the final iteration.

    **Note:** 
        SIC-POVM is calculated by the Weyl-Heisenberg covariant SIC-POVM fiducial state 
        which can be downloaded from [here](http://www.physics.umb.edu/Research/QBism/
        solutions.html).
    """

    para_num = len(x)
    max_episode = len(y)
    if para_num == 1:
        #### single parameter scenario ####
        if M == []:
            M = SIC(len(rho[0]))
        else:
            if type(M) != list:
                raise TypeError("Please make sure M is a list!")
        if savefile == False:
            x_out = []
            if estimator == "mean":
                for mi in range(max_episode):
                    res_exp = int(y[mi])
                    pyx = np.zeros(len(x[0]))
                    for xi in range(len(x[0])):
                        p_tp = np.real(np.trace(np.dot(rho[xi], M[res_exp])))
                        pyx[xi] = p_tp
                    arr = [pyx[m] * p[m] for m in range(len(x[0]))]
                    py = simps(arr, x[0])
                    p_update = pyx * p / py
                    p = p_update
                    mean = simps([p[m] * x[0][m] for m in range(len(x[0]))],
                                 x[0])
                    x_out.append(mean)
            elif estimator == "MAP":
                for mi in range(max_episode):
                    res_exp = int(y[mi])
                    pyx = np.zeros(len(x[0]))
                    for xi in range(len(x[0])):
                        p_tp = np.real(np.trace(np.dot(rho[xi], M[res_exp])))
                        pyx[xi] = p_tp
                    arr = [pyx[m] * p[m] for m in range(len(x[0]))]
                    py = simps(arr, x[0])
                    p_update = pyx * p / py
                    p = p_update
                    indx = np.where(p == max(p))[0][0]
                    x_out.append(x[0][indx])
            else:
                raise ValueError(
                    "{!r} is not a valid value for estimator, supported values are 'mean' and 'MAP'."
                    .format(estimator))
            np.save("pout", p)
            np.save("xout", x_out)
            return p, x_out[-1]
        else:
            p_out, x_out = [], []
            if estimator == "mean":
                for mi in range(max_episode):
                    res_exp = int(y[mi])
                    pyx = np.zeros(len(x[0]))
                    for xi in range(len(x[0])):
                        p_tp = np.real(np.trace(np.dot(rho[xi], M[res_exp])))
                        pyx[xi] = p_tp
                    arr = [pyx[m] * p[m] for m in range(len(x[0]))]
                    py = simps(arr, x[0])
                    p_update = pyx * p / py
                    p = p_update
                    mean = simps([p[m] * x[0][m] for m in range(len(x[0]))],
                                 x[0])
                    p_out.append(p)
                    x_out.append(mean)
            elif estimator == "MAP":
                for mi in range(max_episode):
                    res_exp = int(y[mi])
                    pyx = np.zeros(len(x[0]))
                    for xi in range(len(x[0])):
                        p_tp = np.real(np.trace(np.dot(rho[xi], M[res_exp])))
                        pyx[xi] = p_tp
                    arr = [pyx[m] * p[m] for m in range(len(x[0]))]
                    py = simps(arr, x[0])
                    p_update = pyx * p / py
                    p = p_update
                    indx = np.where(p == max(p))[0][0]
                    p_out.append(p)
                    x_out.append(x[0][indx])
            else:
                raise ValueError(
                    "{!r} is not a valid value for estimator, supported values are 'mean' and 'MAP'."
                    .format(estimator))
            np.save("pout", p_out)
            np.save("xout", x_out)
            return p, x_out[-1]
    else:
        #### multiparameter scenario ####
        p_shape = np.shape(p)
        p_ext = extract_ele(p, para_num)
        rho_ext = extract_ele(rho, para_num)

        p_list, rho_list = [], []
        for p_ele, rho_ele in zip(p_ext, rho_ext):
            p_list.append(p_ele)
            rho_list.append(rho_ele)

        dim = len(rho_list[0])
        if M == []:
            M = SIC(dim)
        else:
            if type(M) != list:
                raise TypeError("Please make sure M is a list!")

        if savefile == False:
            x_out = []
            if estimator == "mean":
                for mi in range(max_episode):
                    res_exp = int(y[mi])
                    pyx_list = np.zeros(len(p_list))
                    for xi in range(len(p_list)):
                        p_tp = np.real(
                            np.trace(np.dot(rho_list[xi], M[res_exp])))
                        pyx_list[xi] = p_tp
                    pyx = pyx_list.reshape(p_shape)
                    arr = p * pyx
                    for si in reversed(range(para_num)):
                        arr = simps(arr, x[si])
                    py = arr
                    p_update = p * pyx / py
                    p = p_update

                    mean = integ(x, p)
                    x_out.append(mean)
            elif estimator == "MAP":
                for mi in range(max_episode):
                    res_exp = int(y[mi])
                    pyx_list = np.zeros(len(p_list))
                    for xi in range(len(p_list)):
                        p_tp = np.real(
                            np.trace(np.dot(rho_list[xi], M[res_exp])))
                        pyx_list[xi] = p_tp
                    pyx = pyx_list.reshape(p_shape)
                    arr = p * pyx
                    for si in reversed(range(para_num)):
                        arr = simps(arr, x[si])
                    py = arr
                    p_update = p * pyx / py
                    p = p_update

                    indx = np.where(np.array(p) == np.max(np.array(p)))
                    x_out.append([x[i][indx[i][0]] for i in range(para_num)])
            else:
                raise ValueError(
                    "{!r} is not a valid value for estimator, supported values are 'mean' and 'MAP'."
                    .format(estimator))
            np.save("Lout", p)
            np.save("xout", x_out)
            return p, x_out[-1]
        else:
            p_out, x_out = [], []
            if estimator == "mean":
                for mi in range(max_episode):
                    res_exp = int(y[mi])
                    pyx_list = np.zeros(len(p_list))
                    for xi in range(len(p_list)):
                        p_tp = np.real(
                            np.trace(np.dot(rho_list[xi], M[res_exp])))
                        pyx_list[xi] = p_tp
                    pyx = pyx_list.reshape(p_shape)
                    arr = p * pyx
                    for si in reversed(range(para_num)):
                        arr = simps(arr, x[si])
                    py = arr
                    p_update = p * pyx / py
                    p = p_update

                    mean = integ(x, p)
                    p_out.append(p)
                    x_out.append(mean)
            elif estimator == "MAP":
                for mi in range(max_episode):
                    res_exp = int(y[mi])
                    pyx_list = np.zeros(len(p_list))
                    for xi in range(len(p_list)):
                        p_tp = np.real(
                            np.trace(np.dot(rho_list[xi], M[res_exp])))
                        pyx_list[xi] = p_tp
                    pyx = pyx_list.reshape(p_shape)
                    arr = p * pyx
                    for si in reversed(range(para_num)):
                        arr = simps(arr, x[si])
                    py = arr
                    p_update = p * pyx / py
                    p = p_update

                    indx = np.where(np.array(p) == np.max(np.array(p)))
                    p_out.append(p)
                    x_out.append([x[i][indx[i][0]] for i in range(para_num)])
            else:
                raise ValueError(
                    "{!r} is not a valid value for estimator, supported values are 'mean' and 'MAP'."
                    .format(estimator))
            np.save("pout", p_out)
            np.save("xout", x_out)
            return p, x_out[-1]
コード例 #9
0
def BCB(x, p, rho, W=[], eps=1e-8):
    """
    Calculation of the Bayesian cost bound with a quadratic cost function.

    Parameters
    ----------
    > **x:** `list`
        -- The regimes of the parameters for the integral.

    > **p:** `multidimensional array`
        -- The prior distribution.

    > **rho:** `multidimensional list`
        -- Parameterized density matrix.

    > **W:** `array`
        -- Weight matrix.

    > **eps:** `float`
        -- Machine epsilon.

    Returns
    ----------
    **BCB:** `float`
        -- The value of the minimum Bayesian cost.
    """
    para_num = len(x)
    if para_num == 1:
        # single-parameter scenario
        dim = len(rho[0])
        p_num = len(x[0])
        value = [p[i] * x[0][i]**2 for i in range(p_num)]
        delta2_x = simps(value, x[0])
        rho_avg = np.zeros((dim, dim), dtype=np.complex128)
        rho_pri = np.zeros((dim, dim), dtype=np.complex128)
        for di in range(dim):
            for dj in range(dim):
                rho_avg_arr = [p[m] * rho[m][di][dj] for m in range(p_num)]
                rho_pri_arr = [
                    p[n] * x[0][n] * rho[n][di][dj] for n in range(p_num)
                ]
                rho_avg[di][dj] = simps(rho_avg_arr, x[0])
                rho_pri[di][dj] = simps(rho_pri_arr, x[0])
        Lambda = Lambda_avg(rho_avg, [rho_pri], eps=eps)
        minBC = delta2_x - np.real(
            np.trace(np.dot(np.dot(rho_avg, Lambda[0]), Lambda[0])))
        return minBC
    else:
        # multi-parameter scenario
        p_shape = np.shape(p)
        p_ext = extract_ele(p, para_num)
        rho_ext = extract_ele(rho, para_num)

        p_list, rho_list = [], []
        for p_ele, rho_ele in zip(p_ext, rho_ext):
            p_list.append(p_ele)
            rho_list.append(rho_ele)

        dim = len(rho_list[0])
        p_num = len(p_list)

        x_pro = product(*x)
        x_list = []
        for x_ele in x_pro:
            x_list.append([x_ele[i] for i in range(para_num)])

        if W == []:
            W = np.identity(para_num)

        value = [0.0 for i in range(p_num)]
        for i in range(p_num):
            x_tp = np.array(x_list[i])
            xCx = np.dot(x_tp.reshape(1, -1), np.dot(W, x_tp.reshape(-1,
                                                                     1)))[0][0]
            value[i] = p_list[i] * xCx
        delta2_x = np.array(value).reshape(p_shape)
        for si in reversed(range(para_num)):
            delta2_x = simps(delta2_x, x[si])
        rho_avg = np.zeros((dim, dim), dtype=np.complex128)
        rho_pri = [
            np.zeros((dim, dim), dtype=np.complex128) for i in range(para_num)
        ]
        for di in range(dim):
            for dj in range(dim):
                rho_avg_arr = [
                    p_list[m] * rho_list[m][di][dj] for m in range(p_num)
                ]
                rho_avg_tp = np.array(rho_avg_arr).reshape(p_shape)
                for si in reversed(range(para_num)):
                    rho_avg_tp = simps(rho_avg_tp, x[si])
                rho_avg[di][dj] = rho_avg_tp

                for para_i in range(para_num):
                    rho_pri_arr = [
                        p_list[n] * x_list[n][para_i] * rho_list[n][di][dj]
                        for n in range(p_num)
                    ]
                    rho_pri_tp = np.array(rho_pri_arr).reshape(p_shape)
                    for si in reversed(range(para_num)):
                        rho_pri_tp = simps(rho_pri_tp, x[si])

                    rho_pri[para_i][di][dj] = rho_pri_tp
        Lambda = Lambda_avg(rho_avg, rho_pri, eps=eps)
        Mat = np.zeros((para_num, para_num), dtype=np.complex128)
        for para_m in range(para_num):
            for para_n in range(para_num):
                Mat += W[para_m][para_n] * np.dot(Lambda[para_m],
                                                  Lambda[para_n])

        minBC = delta2_x - np.real(np.trace(np.dot(rho_avg, Mat)))
        return minBC
コード例 #10
0
def BayesCost(x, p, xest, rho, M, W=[], eps=1e-8):
    """
    Calculation of the average Bayesian cost with a quadratic cost function.

    Parameters
    ----------
    > **x:** `list`
        -- The regimes of the parameters for the integral.

    > **p:** `multidimensional array`
        -- The prior distribution.
        
    > **xest:** `list`
        -- The estimators.

    > **rho:** `multidimensional list`
        -- Parameterized density matrix.

    > **M:** `array`
        -- A set of POVM.
    
    > **W:** `array`
        -- Weight matrix.

    > **eps:** `float`
        -- Machine epsilon.

    Returns
    ----------
    **The average Bayesian cost:** `float`
        -- The average Bayesian cost.
    """
    para_num = len(x)
    if para_num == 1:
        # single-parameter scenario
        if M == []:
            M = SIC(len(rho[0]))
        else:
            if type(M) != list:
                raise TypeError("Please make sure M is a list!")
        p_num = len(x[0])
        value = [
            p[i] * sum([
                np.trace(np.dot(rho[i], M[mi])) * (x[0][i] - xest[mi][0])**2
                for mi in range(len(M))
            ]) for i in range(p_num)
        ]
        C = simps(value, x[0])
        return np.real(C)
    else:
        # multi-parameter scenario
        p_shape = np.shape(p)
        p_ext = extract_ele(p, para_num)
        rho_ext = extract_ele(rho, para_num)

        p_list, rho_list = [], []
        for p_ele, rho_ele in zip(p_ext, rho_ext):
            p_list.append(p_ele)
            rho_list.append(rho_ele)

        x_pro = product(*x)
        x_list = []
        for x_ele in x_pro:
            x_list.append([x_ele[i] for i in range(para_num)])

        dim = len(rho_list[0])
        p_num = len(p_list)

        if W == []:
            W = np.identity(para_num)

        if M == []:
            M = SIC(dim)
        else:
            if type(M) != list:
                raise TypeError("Please make sure M is a list!")

        value = [0.0 for i in range(p_num)]
        for i in range(p_num):
            x_tp = np.array(x_list[i])
            xCx = 0.0
            for mi in range(len(M)):
                xCx += np.trace(np.dot(rho_list[i], M[mi])) * np.dot(
                    (x_tp - xest[mi]).reshape(1, -1),
                    np.dot(W, (x_tp - xest[mi]).reshape(-1, 1)))[0][0]
            value[i] = p_list[i] * xCx
        C = np.array(value).reshape(p_shape)
        for si in reversed(range(para_num)):
            C = simps(C, x[si])
        return np.real(C)
コード例 #11
0
def MLE(x, rho, y, M=[], savefile=False):
    """
    Bayesian estimation. The estimated value of parameters obtained via the 
    maximum likelihood estimation (MLE).

    Parameters
    ----------
    > **x:** `list`
        -- The regimes of the parameters for the integral.

    > **rho:** `multidimensional list`
        -- Parameterized density matrix.

    > **y:** `array`
        -- The experimental results obtained in practice.

    > **M:** `list of matrices`
        -- A set of positive operator-valued measure (POVM). The default measurement 
        is a set of rank-one symmetric informationally complete POVM (SIC-POVM).

    > **savefile:** `bool`
        -- Whether or not to save all the likelihood functions.  
        If set `True` then two files "Lout.npy" and "xout.npy" will be generated including
        the likelihood functions and the estimated values in the iterations. If set 
        `False` the likelihood function in the final iteration and the estimated values
        in all iterations will be saved in "Lout.npy" and "xout.npy". 

    Returns
    ----------
    **Lout and xout:** `array and float`
        -- The likelihood function and the estimated values in the final iteration.

    **Note:** 
        SIC-POVM is calculated by the Weyl-Heisenberg covariant SIC-POVM fiducial state 
        which can be downloaded from [here](http://www.physics.umb.edu/Research/QBism/
        solutions.html).
    """

    para_num = len(x)
    max_episode = len(y)
    if para_num == 1:
        #### single parameter scenario ####
        if M == []:
            M = SIC(len(rho[0]))
        else:
            if type(M) != list:
                raise TypeError("Please make sure M is a list!")

        if savefile == False:
            x_out = []
            L_out = np.ones(len(x[0]))
            for mi in range(max_episode):
                res_exp = int(y[mi])
                for xi in range(len(x[0])):
                    p_tp = np.real(np.trace(np.dot(rho[xi], M[res_exp])))
                    L_out[xi] = L_out[xi] * p_tp
                indx = np.where(L_out == max(L_out))[0][0]
                x_out.append(x[0][indx])
            np.save("Lout", L_out)
            np.save("xout", x_out)

            return L_out, x_out[-1]
        else:
            L_out, x_out = [], []
            L_tp = np.ones(len(x[0]))
            for mi in range(max_episode):
                res_exp = int(y[mi])
                for xi in range(len(x[0])):
                    p_tp = np.real(np.trace(np.dot(rho[xi], M[res_exp])))
                    L_tp[xi] = L_tp[xi] * p_tp
                indx = np.where(L_tp == max(L_tp))[0][0]
                L_out.append(L_tp)
                x_out.append(x[0][indx])

            np.save("Lout", L_out)
            np.save("xout", x_out)
            return L_tp, x_out[-1]
    else:
        #### multiparameter scenario ####
        p_shape = []
        for i in range(para_num):
            p_shape.append(len(x[i]))
        rho_ext = extract_ele(rho, para_num)

        rho_list = []
        for rho_ele in rho_ext:
            rho_list.append(rho_ele)

        dim = len(rho_list[0])
        if M == []:
            M = SIC(dim)
        else:
            if type(M) != list:
                raise TypeError("Please make sure M is a list!")

        if savefile == False:
            x_out = []
            L_list = np.ones(len(rho_list))
            for mi in range(max_episode):
                res_exp = int(y[mi])
                for xi in range(len(rho_list)):
                    p_tp = np.real(np.trace(np.dot(rho_list[xi], M[res_exp])))
                    L_list[xi] = L_list[xi] * p_tp
                L_out = L_list.reshape(p_shape)
                indx = np.where(L_out == np.max(L_out))
                x_out.append([x[i][indx[i][0]] for i in range(para_num)])
            np.save("Lout", L_out)
            np.save("xout", x_out)

            return L_out, x_out[-1]
        else:
            L_out, x_out = [], []
            L_list = np.ones(len(rho_list))
            for mi in range(max_episode):
                res_exp = int(y[mi])
                for xi in range(len(rho_list)):
                    p_tp = np.real(np.trace(np.dot(rho_list[xi], M[res_exp])))
                    L_list[xi] = L_list[xi] * p_tp
                L_tp = L_list.reshape(p_shape)
                indx = np.where(L_tp == np.max(L_tp))
                L_out.append(L_tp)
                x_out.append([x[i][indx[i][0]] for i in range(para_num)])

            np.save("Lout", L_out)
            np.save("xout", x_out)
            return L_tp, x_out[-1]