コード例 #1
0
def instances(GD, N, MD, p):
    """
    The formula from "Optimizing the oracle under a depth limit". Assuming single-target, t = 1.

    :params GD: Grover's oracle depth
    :params N:  keyspace size
    :params MD: MAXDEPTH
    :params p:  target success probability

    Assuming p = 1
        In depth MD can fit j = floor(MD/GD) iterations.
        These give probability 1 for a search space of size M. 
            p(j) = sin((2j+1)theta)**2
            1 = sin((2j+1)theta)**2
            1 = sin((2j+1)theta)
            (2j+1)theta = pi/2
            theta = pi/(2(2j+1)) = sqrt(t/M) = 1/sqrt(M).
            sqrt(M) = 2(2j+1)/pi
            M = (2(2j+1)/pi)**2

        Hence need S = ceil(N/M) machines.
            S = ceil(N/(2(2*floor(MD/GD)+1)/pi)**2)
    Else
        Could either lower each individual computer's success prob, since the target is inside only one computer's state.
            Then given a requested p, we have
            p = sin((2j+1)theta)**2
            arcsine(sqrt(p)) = (2j+1)theta = (2j+1)/sqrt(M)
            M = (2j+1)**2/arcsine(sqrt(p))**2
            S = ceil(N*(arcsine(sqrt(p))/(2j+1))**2)

        Or could just run full depth but have less machines.
            For a target p, one would choose to have ceil(p*S) machines, where S is chosen as in the p = 1 case.
        Then look at which of both strategies gives lower cost.
    """

    # compute the p=1 case first
    S1 = ceil(N / (2 * (2 * floor(MD / GD) + 1) / pi)**2)

    # An alternative reasoning giving the same result for p == 1 (up to a very small difference):
    # Inner parallelisation gives sqrt(S) speedup without loosing success prob.
    # Set ceil(sqrt(N) * pi/4) * GD/sqrt(S) = MAXDEPTH
    # S1 = float(ceil(((pi*sqrt(N)/4) * GD / MD)**2))

    if p == 1:
        return S1
    else:
        Sp = ceil(N * (arcsin(sqrt(R(p))) / (2 * floor(MD / GD) + 1))**2)
        if ceil(p * S1) == Sp:
            print(
                "NOTE: for (GD, log2(N), log2(MD), p) == (%d, %.2f, %.2f, %.2f) naive reduction of parallel machines is not worse!"
                % (GD, log(N, 2).n(), log(MD, 2).n(), p))
        elif ceil(p * S1) < Sp:
            print(
                "NOTE: for (GD, log2(N), log2(MD), p) == (%d, %.2f, %.2f, %.2f) naive reduction of parallel machines is better!"
                % (GD, log(N, 2).n(), log(MD, 2).n(), p))

        res = min(Sp, ceil(p * S1))
        return res
コード例 #2
0
def iterations(p, N, t):
    """
    Grover succeeds with probability p(j) = sin^2((2j+1)theta).
    This is the inverse function for obtaining the number of iterations j from theta and p(j).

    :params p:  Grover's success prob
    :params N:  Grover's search space size
    :params t:  targets. Right now, assumed to be 1.
    """
    return (R(arcsin(sqrt(p)))/theta(N,t) - 1)/2
コード例 #3
0
def FGRooDhFkch():
    pspict, fig = SinglePicture("FGRooDhFkch")
    pspict.dilatation_X(3)
    pspict.dilatation_Y(1)

    x = var('x')
    f = phyFunction(arcsin(x)).graph(-1, 1)

    pspict.DrawGraphs(f)
    pspict.axes.single_axeY.axes_unit = AxesUnit(pi / 2, '')
    pspict.grid.Dy = pi / 2

    pspict.DrawDefaultGrid()
    pspict.DrawDefaultAxes()
    #fig.no_figure()
    fig.conclude()
    fig.write_the_file()
コード例 #4
0
def theta(N, t):
    """
    Given search space of size N with t solutions, returns angle theta that each Grover iteration advances.
    """
    return R(arcsin(sqrt(R(t)/N)))