def tree_prune(G: nx.DiGraph, leaves_at_depth_d: dict, d: int, prune_rate: float): """Remove nodes from the tree based on the set prune rate and the total cost of the path from root to leaf. Args: G: NetworkX DiGraph object that represents our tree. leaves_at_depth_d: Dictonary that keeps track of all the leaves at level d d: the depth that we are pruning at prune_rate: The percentage of leaves to be removed Returns: """ cost_at_leaf = [] # loop over the leaves at depth d for leaf in leaves_at_depth_d[d - 1]: cost_at_leaf.append(tree_cost_of_path(G, leaf)) # Sort both leaves and cost according to cost, ascendingly leaves_sorted = [ x for _, x in sorted(zip(cost_at_leaf, leaves_at_depth_d[d - 1])) ] leaves_kept = leaves_sorted[int(np.ceil(prune_rate * len(cost_at_leaf))):] leaves_removed = leaves_sorted[:int(np.ceil(prune_rate * len(cost_at_leaf)))] G.remove_nodes_from(leaves_removed) leaves_at_depth_d[d - 1] = leaves_kept
def config(X): n = int(np.ceil(np.log2(len(X[0])))) # pylint: disable=no-member n = 2**int(np.ceil(np.log2(n))) # n tem que ser potência de 2. # pylint: disable=no-member N = n # número total de qubits no circuito. w = 2*n - 1 # número de parâmetros do circuito (weights) X = np.c_[X, np.zeros((len(X), 2**n-len(X[0])))] # o número de qubits necessários para codificar os dados (log_2(N)) precisa ser uma potencia de 2. # pylint: disable=no-member return n, N, w, X
def config(X): n = int(np.ceil(np.log2(len(X[0])))) # pylint: disable=no-member n = 2**int(np.ceil(np.log2(n))) # n tem que ser potência de 2. # pylint: disable=no-member N = 2**n - 1 # len(X[0])-1 # N precisa ser tal que n seja potência de 2 mais próxima de log_2(X[0]). O hierarchical exige que n seja dessa maneira. w = 2 * n - 1 # número de parâmetros do circuito (weights) X = np.c_[X, np.zeros((len(X), 2**n - len(X[0])))] # o número de qubits necessários para codificar os dados (log_2(N)) precisa ser uma potencia de 2. # pylint: disable=no-member return n, N, w, X
def tree_prune(G, leaves_at_depth_d, d): cost_at_leaf = [] for leaf in leaves_at_depth_d[d - 1]: cost_at_leaf.append(tree_cost_of_path(G, leaf)) leaves_sorted = [ x for _, x in sorted(zip(cost_at_leaf, leaves_at_depth_d[d - 1])) ] leaves_kept = leaves_sorted[int(np.ceil(PRUNE_RATE * len(cost_at_leaf))):] leaves_removed = leaves_sorted[:int(np.ceil(PRUNE_RATE * len(cost_at_leaf)))] G.remove_nodes_from(leaves_removed) leaves_at_depth_d[d - 1] = leaves_kept
def config(X): n = 2**int(np.ceil(np.log2(len(X[0])))) # len(X[0]) # número de qubits necessário para armazenar o dado codificado. # pylint: disable=no-member N = n # número total de qubits no circuito. w = 2 * n - 1 # número de parâmetros do circuito (weights) X = np.c_[X, np.zeros((len(X), n - len(X[0])))] # pylint: disable=no-member return n, N, w, X
def __init__(self, feature_dim, n_layers, n_qubits, n_features_per_qubit=1, n_latent_qubits=0, wires=None): self.__feature_dim = self.__type_checker(feature_dim, int, 'feature_dim') self.__n_layers = self.__type_checker(n_layers, int, 'n_layers') self.__n_qubits = self.__type_checker(n_qubits, int, 'n_qubits') self.__n_features_per_qubit = self.__type_checker(n_features_per_qubit, int, 'n_qubits') self.__n_latent_qubits = self.__type_checker(n_latent_qubits, int, 'n_latent_qubits') if wires is None: self.__wires = list(range(self.n_total_qubits)) elif isinstance(wires, int): self.__wires = list(range(wires)) elif isinstance(wires, list): self.__wires = wires elif isinstance(wires, range): self.__wires = list(wires) else: raise ValueError('Wires argument needs to be None, a list, range-object or an integer') if len(self.__wires) != self.n_total_qubits: raise ValueError('Number of wires must match number of total qubits!') self.__n_sub_layers = int(np.ceil(self.feature_dim / (self.n_qubits * self.n_features_per_qubit))) if self.n_sub_layers > 1: self.__n_repeats_per_layer = 1 self.__padding_width = self.n_features_per_qubit * self.n_qubits * self.n_sub_layers - self.feature_dim else: self.__n_repeats_per_layer = int(np.floor(self.n_features_per_qubit * self.n_qubits / self.feature_dim)) self.__padding_width = (self.n_features_per_qubit * self.n_qubits) % self.feature_dim
def shadow_bound(error, observables, failure_rate=0.01): """ Calculate the shadow bound for the Pauli measurement scheme. Implements Eq. (S13) from https://arxiv.org/pdf/2002.08953.pdf Args: error (float): The error on the estimator. observables (list) : List of matrices corresponding to the observables we intend to measure. failure_rate (float): Rate of failure for the bound to hold. Returns: An integer that gives the number of samples required to satisfy the shadow bound and the chunk size required attaining the specified failure rate. """ M = len(observables) K = 2 * np.log(2 * M / failure_rate) shadow_norm = ( lambda op: np.linalg.norm( op - np.trace(op) / 2 ** int(np.log2(op.shape[0])), ord=np.inf ) ** 2 ) N = 34 * max(shadow_norm(o) for o in observables) / error ** 2 return int(np.ceil(N * K)), int(K)
def Words_Entangler_circuit(params_, wires): """Apply many controled-rotation between words 'params_' is a matrix of different angles of size (num_words/2) x qbits_per_word """ mask = np.zeros_like(params_) #EVEN number of qubits if num_words % 2 == 0: for bit in range(qbits_per_word): for i in range(int(np.ceil(num_words / 2))): if bit % 2 != 0: i_ = i * qbits_per_word * 2 + qbits_per_word if i_ + bit + qbits_per_word > len(wires) - 1: qml.CRY(params_[bit][i], wires=[wires[i_ + bit], wires[bit]]) mask[bit][i] = 1 else: qml.CRY(params_[bit][i], wires=[ wires[i_ + bit], wires[i_ + bit + qbits_per_word] ]) mask[bit][i] = 1 else: i_ = i * qbits_per_word * 2 qml.CRY(params_[bit][i], wires=[ wires[i_ + bit], wires[i_ + bit + qbits_per_word] ]) mask[bit][i] = 1 #ODD number of qubits elif num_words % 2 == 1: for bit in range(qbits_per_word): if bit % 2 == 0: #even bits: need to loop back the last CRot for i in range(int(np.ceil(num_words / 2))): i_ = i * qbits_per_word * 2 if i_ + bit + qbits_per_word > len(wires) - 1: if bit == qbits_per_word - 1: #last layer qml.CRY(params_[bit][i], wires=[wires[i_ + bit], wires[bit]]) mask[bit][i] = 1 else: qml.CRY(params_[bit][i], wires=[wires[i_ + bit], wires[bit + 1]]) mask[bit][i] = 1 else: qml.CRY(params_[bit][i], wires=[ wires[i_ + bit], wires[i_ + bit + qbits_per_word] ]) mask[bit][i] = 1 else: #odd bits for i in range(int(np.floor(num_words / 2))): i_ = i * qbits_per_word * 2 + qbits_per_word qml.CRY(params_[bit][i], wires=[ wires[i_ + bit], wires[i_ + bit + qbits_per_word] ]) mask[bit][i] = 1
def step(self, objective_fn, *args, **kwargs): """Update trainable arguments with one step of the optimizer. Args: objective_fn (function): the objective function for optimization *args: variable length argument list for objective function **kwargs: variable length of keyword arguments for the objective function Returns: list[array]: The new variable values :math:`x^{(t+1)}`. If single arg is provided, list[array] is replaced by array. """ self.trainable_args = set() for index, arg in enumerate(args): if getattr(arg, "requires_grad", True): self.trainable_args |= {index} if self.s is None: # Number of shots per parameter self.s = [ np.zeros_like(a, dtype=np.int64) + self.min_shots for i, a in enumerate(args) if i in self.trainable_args ] # keep track of the number of shots run s = np.concatenate([i.flatten() for i in self.s]) self.max_shots = max(s) self.shots_used = int(2 * np.sum(s)) self.total_shots_used += self.shots_used # compute the gradient, as well as the variance in the gradient, # using the number of shots determined by the array s. grads, grad_variances = self.compute_grad(objective_fn, args, kwargs) new_args = self.apply_grad(grads, args) if self.xi is None: self.chi = [np.zeros_like(g, dtype=np.float64) for g in grads] self.xi = [np.zeros_like(g, dtype=np.float64) for g in grads] # running average of the gradient self.chi = [self.mu * c + (1 - self.mu) * g for c, g in zip(self.chi, grads)] # running average of the gradient variance self.xi = [self.mu * x + (1 - self.mu) * v for x, v in zip(self.xi, grad_variances)] for idx, (c, x) in enumerate(zip(self.chi, self.xi)): xi = x / (1 - self.mu ** (self.k + 1)) chi = c / (1 - self.mu ** (self.k + 1)) # determine the new optimum shots distribution for the next # iteration of the optimizer s = np.ceil( (2 * self.lipschitz * self.stepsize * xi) / ((2 - self.lipschitz * self.stepsize) * (chi ** 2 + self.b * (self.mu ** self.k))) ) # apply an upper and lower bound on the new shot distributions, # to avoid the number of shots reducing below min(2, min_shots), # or growing too significantly. gamma = ( (self.stepsize - self.lipschitz * self.stepsize ** 2 / 2) * chi ** 2 - xi * self.lipschitz * self.stepsize ** 2 / (2 * s) ) / s argmax_gamma = np.unravel_index(np.argmax(gamma), gamma.shape) smax = max(s[argmax_gamma], 2) self.s[idx] = np.squeeze(np.int64(np.clip(s, min(2, self.min_shots), smax))) self.k += 1 # unwrap from list if one argument, cleaner return if len(new_args) == 1: return new_args[0] return new_args