Exemplo n.º 1
0
def cross_entropy(predictions: np.ndarray,
                  targets: np.ndarray,
                  epsilon: float = 1e-15) -> float:
    """
    Cross entropy calculation between :py:attr:`targets` (encoded as one-hot vectors)
    and :py:attr:`predictions`. Predictions are normalized to sum up to `1.0`.

    .. note::

        The implementation of this function is based on the discussion on
        `StackOverflow <https://stackoverflow.com/a/47398312/10138546>`_.

        Due to ArrayBoxes that are required for automatic differentiation, we currently
        use this implementation instead of implementations provided by sklearn for
        example.

    :param predictions: Predictions in same order as targets. In case predictions for
        several samples are given, the weighted cross entropy is returned.
    :param targets: Ground truth labels for supplied samples.
    :param epsilon: Amount to clip predictions as log is not defined for `0` and `1`.
    """
    assert (
        predictions.shape == targets.shape
    ), f"Shape of predictions {predictions.shape} must match targets {targets.shape}"
    current_sum = np.sum(predictions, axis=predictions.ndim - 1)

    if predictions.ndim == 1:
        sample_count = 1
        predictions = predictions / current_sum
    else:
        sample_count = predictions.shape[0]
        predictions = predictions / current_sum[:, np.newaxis]

    predictions = np.clip(predictions, epsilon, 1.0 - epsilon)
    return -np.sum(targets * np.log(predictions)) / sample_count
Exemplo n.º 2
0
def shadow_bound(error, observables, failure_rate=0.01):
    """
    Calculate the shadow bound for the Pauli measurement scheme.

    Implements Eq. (S13) from https://arxiv.org/pdf/2002.08953.pdf

    Args:
        error (float): The error on the estimator.
        observables (list) : List of matrices corresponding to the observables we intend to
            measure.
        failure_rate (float): Rate of failure for the bound to hold.

    Returns:
        An integer that gives the number of samples required to satisfy the shadow bound and
        the chunk size required attaining the specified failure rate.
    """
    M = len(observables)
    K = 2 * np.log(2 * M / failure_rate)
    shadow_norm = (
        lambda op: np.linalg.norm(
            op - np.trace(op) / 2 ** int(np.log2(op.shape[0])), ord=np.inf
        )
        ** 2
    )
    N = 34 * max(shadow_norm(o) for o in observables) / error ** 2
    return int(np.ceil(N * K)), int(K)
def cross_entropy(X, y):

    m = y.shape[0]
    p = np.array([softmax(x) for x in X])
    # We use multidimensional array indexing to extract
    # softmax probability of the correct label for each sample.
    log_likelihood = -np.log(p[range(m), y])
    loss = np.sum(log_likelihood) / m
    return loss
Exemplo n.º 4
0
 def disc_cost(d_weights):
     cost = 0.0
     for j in range(MINIBATCH_SIZE):
         D_real = prob_real(
             real_disc_circuit(d_weights,
                               data=data[j][0] + [data[j][1]]))
         G_real = gen_output(
             real_gen_circuit(gen_weights, data=data[j][0]))
         D_fake = prob_real(
             real_disc_circuit(d_weights,
                               data=data[j][0] + [G_real]))
         if type(D_real) != np.float64:
             D_real = D_real._value
         if type(D_fake) != np.float64:
             D_fake = D_fake._value
         cost -= np.log(D_real) + np.log(1 - D_fake)
     cost /= MINIBATCH_SIZE
     return cost
Exemplo n.º 5
0
 def EnsembleEntropy(self, ProbDist):
     '''
     Compute ensemble entropy
     from prob dist. 
     '''
     ent = 0.0
     # E = sum of entropies since
     # ansatz is prod state
     for dist in ProbDist:
         ent += -1 * np.sum(dist * np.log(dist))
     return ent
Exemplo n.º 6
0
def cross_entropy(labels, predictions):
    """ Categorical cross entropy loss function

    Args:
        labels (array[float]): 1-d array of labels
        predictions (array[float]): 1-d array of predictions

    Returns:
        float: cross entropy
    """
    loss = 0
    for label, pred in zip(labels, predictions):
        for label_, pred_ in zip(label, pred):
            loss -= label_ * np.log(pred_)
    loss = loss / len(labels)
    return loss
Exemplo n.º 7
0
np.random.seed(1)

# initialize some random vals for the periodic convolution
init = np.random.uniform(low=-1, high=1, size=(2))

c = np.zeros(M)
c[0] = init[0]
c[1] = init[1]

# get the fourier transformed values and seperate argument and absolute value
complex_vals = np.fft.fft(c)
scale_arg = np.angle(complex_vals)
scale_abs = np.absolute(complex_vals)

# get the required squeeze scaling argument
r1 = -np.log(scale_abs)

# perform a neural network feed forward using the transformed Fourier matrix, the required phase shift using a rotation gate (phase gate),
# the required squeeze scaling and the Fourier matrix

circ_res = quantum_circ1(x=x,
                         phi_x=phi_x,
                         U1=F_H,
                         U2=F,
                         r=r1,
                         phi_r=phi_r,
                         phi_rot=scale_arg)

# theoretical transformation
mat = F @ np.diag(scale_abs * np.exp(1J * scale_arg)) @ F_H
Exemplo n.º 8
0
        ansatz = mera_circuit(num_qubits, periodic, fix_layers)
        cost_fn = qml.ExpvalCost(ansatz, H, dev)
        grad = qml.grad(cost_fn)

        num_params_per_gate = 15
        num_gates = get_num_mera_gates(num_qubits, periodic, fix_layers)
        num_params = num_params_per_gate * num_gates
        params = np.pi * (np.random.rand(num_params) - 1.0)

        gradient = np.array(grad(params)[0])
        grad_vals.append(gradient)
    variances.append(np.mean(np.var(grad_vals, axis=0)))

print(variances)
#variances = np.array(np.mean(variances, axis=1))
qubits = np.array(qubits)

# Fit the semilog plot to a straight line
p = np.polyfit(qubits, np.log(variances), 1)

# Plot the straight line fit to the semilog
plt.semilogy(qubits, variances, "o")
plt.semilogy(qubits,
             np.exp(p[0] * qubits + p[1]),
             "o-.",
             label="Slope {:3.2f}".format(p[0]))
plt.xlabel(r"N Qubits")
plt.ylabel(r"$\langle \partial \theta_{1, 1} E\rangle$ variance")
plt.legend()
plt.show()
 def classical(p):
     "Classical node, requires autograd.numpy functions."
     return anp.exp(anp.sum(quantum(p[0], anp.log(p[1]))))
Exemplo n.º 10
0
def second_renyi_entropy(rho):
    """Computes the second Renyi entropy of a given density matrix."""
    # DO NOT MODIFY anything in this code block
    rho_diag_2 = np.diagonal(rho)**2.0
    return -np.real(np.log(np.sum(rho_diag_2)))