def loss_and_accuracy( circuit, masked_circuit: MaskedCircuit, rotations: List, wires_to_measure: Tuple[int, ...], interpret: Tuple[int, ...], data: np.ndarray, target: np.ndarray, variant: str = "test", ): correct = 0 count = len(data) costs = [] for current_data, current_target in zip(data, target): output = circuit( masked_circuit.differentiable_parameters, current_data, rotations, masked_circuit, masked_circuit.wires, wires_to_measure, ) costs.append( cost_basis( circuit, masked_circuit.differentiable_parameters, current_data, current_target, rotations, masked_circuit, masked_circuit.wires, wires_to_measure, interpret, )) selected_output = output[interpret, ] same = np.argmax(current_target) == np.argmax(selected_output) if same: correct += 1 if __debug__: print(f"Label: {current_target} Output: {selected_output} " f"({output}) Correct: {same}") accuracy = correct / count loss = np.average(costs) if __debug__: print( f"[{variant}] Accuracy = {correct} / {count} = {accuracy}\n", f"[{variant}] Avg Cost: {loss}", ) return { "accuracy": accuracy, "loss": loss, }
def ohe_accuracy(labels, predictions): """ Args: labels: predictions: Returns: """ loss = 0 for l, p in zip(labels, predictions): loss += np.argmax(l) == np.argmax(p) return loss / labels.shape[0]
def test(params, x, y, state_labels=None): """ Tests on a given set of data. Args: params (array[float]): array of parameters x (array[float]): 2-d array of input vectors y (array[float]): 1-d array of targets state_labels (array[float]): 1-d array of state representations for labels Returns: predicted (array([int]): predicted labels for test data output_states (array[float]): output quantum states from the circuit """ fidelity_values = [] dm_labels = [density_matrix(s) for s in state_labels] predicted = [] for i in range(len(x)): fidel_function = lambda y: qcircuit(params, x=x[i], y=y) fidelities = [fidel_function(dm) for dm in dm_labels] best_fidel = np.argmax(fidelities) predicted.append(best_fidel) fidelity_values.append(fidelities) return np.array(predicted), np.array(fidelity_values)
def classify(q_circuits, all_params, feature_vecs, labels): predicted_labels = [] for i, feature_vec in enumerate(feature_vecs): scores = [0, 0, 0] for c in range(num_classes): score = variational_classifier( q_circuits[c], (all_params[0][c], all_params[1][c]), feature_vec ) scores[c] = float(score) pred_class = np.argmax(scores) predicted_labels.append(pred_class) return predicted_labels
def get_action(self, state): self.check_if_state_exist(state) if self.training == True and np.random.rand( ) < self.epsilon_min + (self.epsilon_max - self.epsilon_min) * pow( 0.5, self.visits[self.statevector2int(state)] / self.epsilon_halflife): target_action = np.random.choice(self.actions) else: qvalues = self.learner(self.params, state)[:len(self.actions)] idx_list = list(range(len(qvalues))) random.shuffle(idx_list) reordered = qvalues[idx_list] target_action = idx_list[np.argmax(reordered)] return target_action
def number_of_solutions(indices): """Implement the formula given in the problem statement to find the number of solutions from the output of your circuit Args: - indices (list(int)): A list of bits representing the elements that map to 1. Returns: - (float): number of elements as estimated by the quantum counting algorithm """ # QHACK # probs = circuit(indices) dec = np.argmax(probs) theta = dec * np.pi / 8 return 16 * np.sin(theta / 2)**2
def predicted_labels(states, state_labels=None): """ Computes the label of the predicted state by selecting the one with maximum fidelity. Args: weights (array[float]): array of weights x (array[float]): 2-d array of input vectors y (array[float]): 1-d array of targets state_labels (array[float]): 1-d array of state representations for labels Returns: float: loss value to be minimized """ output_labels = [np.argmax([fidelity(s, label) for label in state_labels]) for s in states] return np.array(output_labels)
def test(weights, x, y): fidelity_values = [] predicted = [] y = [0, 1] for i in range(len(x)): fidel_function = lambda y: circuit(weights, x=x[i], y=y, bias=Q_bias) fidelities = [fidel_function(dm).item() for dm in y] # fidelities = [circuit(weights, x[i], label).item() for label in y] # print(fidelities) # print("++++++++++++++++++++++++++++++++") best_fidel = np.argmax(fidelities) predicted.append(best_fidel) fidelity_values.append(fidelities) # print(predicted) # print(fidelity_values) # print(predicted) return np.array(predicted), np.array(fidelity_values)
def qaoa_maxcut(n_layers=1): print("\np={:d}".format(n_layers)) # initialize the parameters near zero init_params = 0.01 * np.random.rand(2, 2) # minimize the negative of the objective function def objective(params): gammas = params[0] betas = params[1] neg_obj = 0 for edge in graph: # objective for the MaxCut problem neg_obj -= 0.5 * ( 1 - circuit(gammas, betas, edge=edge, n_layers=n_layers)) return neg_obj # initialize optimizer: Adagrad works well empirically opt = qml.AdagradOptimizer(stepsize=0.5) # optimize parameters in objective params = init_params steps = 30 for i in range(steps): params = opt.step(objective, params) if (i + 1) % 5 == 0: print("Objective after step {:5d}: {: .7f}".format( i + 1, -objective(params))) # sample measured bitstrings 100 times bit_strings = [] n_samples = 100 for i in range(0, n_samples): bit_strings.append( int(circuit(params[0], params[1], edge=None, n_layers=n_layers))) # print optimal parameters and most frequently sampled bitstring counts = np.bincount(np.array(bit_strings)) most_freq_bit_string = np.argmax(counts) print("Optimized (gamma, beta) vectors:\n{}".format(params[:, :n_layers])) print("Most frequently sampled bit string is: {:04b}".format( most_freq_bit_string)) return -objective(params), bit_strings
def predict(self, features): """Predicts certain obervations. Args: features (array):observations to be predicted Returns: preds: float or int prediction of the model """ model_output = np.array( [self.neural_network(self.var, features=x_) for x_ in features]) if self.type_problem == "classification": return np.where(model_output > 0., 1, 0) elif self.type_problem == "multiclassification": soft_outputs = np.exp(model_output) / \ np.sum(np.exp(model_output), axis=1)[:, None] return np.argmax(soft_outputs, axis=1) return model_output
def get_counts(params): gammas = [params[0], params[2], params[4], params[6]] betas = [params[1], params[3], params[5], params[7]] # The results (bit strings) of running the circuit 100 times and getting 100 measurements bit_strings = [] for i in range(0, num_reps): hold = int( circuit(gammas, betas, edge=None, num_layers=num_layers)) bit_strings.append( hold ) # This appends the integer from 0-15 (if 4 nodes) so it outputs the computational basis measurement in decimal. counts = np.bincount( np.array(bit_strings) ) # A 1x16 array that shows the frequency of each bitstring output most_freq_bit_string = np.argmax( counts) # Finds the most frequent bitstring return counts, bit_strings, most_freq_bit_string
plt.scatter(x_0, x_1) plt.scatter(samples[:, 0], samples[:, 1]) plt.ylim([0, 1]) plt.xlim([0, 1]) plt.show() zero_or_not = svm_trained_kernel.predict(samples) # zero or not one_or_not = svm_trained_kernel_1.predict(samples) # zero or not certainty = np.absolute(((np.sum(zero_or_not) + 15) / 30) - ((np.sum(one_or_not) + 15) / 30)) iterations += 1 print("Certainty:", (np.sum(zero_or_not) + 15) / 30, (np.sum(one_or_not) + 15) / 30, certainty) print("Classification:", np.argmax([np.sum(zero_or_not), np.sum(one_or_not)])) sample_size = 15 for i in range(5): current_sample = i #current_test_image = test_X1[current_sample] x_0, x_1 = np.asarray(np.where(test_X1[current_sample] >= 0.95)) / 28 x = np.asarray([x_0, x_1]) certainty = 0. iterations = 0 while (certainty < 0.11 and iterations < 5): print("Iteration:", iterations) test_indices = np.random.randint(low=0, high=len(x.T), size=sample_size) samples = x.T[test_indices]
def step(self, objective_fn, *args, **kwargs): """Update trainable arguments with one step of the optimizer. Args: objective_fn (function): the objective function for optimization *args: variable length argument list for objective function **kwargs: variable length of keyword arguments for the objective function Returns: list[array]: The new variable values :math:`x^{(t+1)}`. If single arg is provided, list[array] is replaced by array. """ self.trainable_args = set() for index, arg in enumerate(args): if getattr(arg, "requires_grad", True): self.trainable_args |= {index} if self.s is None: # Number of shots per parameter self.s = [ np.zeros_like(a, dtype=np.int64) + self.min_shots for i, a in enumerate(args) if i in self.trainable_args ] # keep track of the number of shots run s = np.concatenate([i.flatten() for i in self.s]) self.max_shots = max(s) self.shots_used = int(2 * np.sum(s)) self.total_shots_used += self.shots_used # compute the gradient, as well as the variance in the gradient, # using the number of shots determined by the array s. grads, grad_variances = self.compute_grad(objective_fn, args, kwargs) new_args = self.apply_grad(grads, args) if self.xi is None: self.chi = [np.zeros_like(g, dtype=np.float64) for g in grads] self.xi = [np.zeros_like(g, dtype=np.float64) for g in grads] # running average of the gradient self.chi = [self.mu * c + (1 - self.mu) * g for c, g in zip(self.chi, grads)] # running average of the gradient variance self.xi = [self.mu * x + (1 - self.mu) * v for x, v in zip(self.xi, grad_variances)] for idx, (c, x) in enumerate(zip(self.chi, self.xi)): xi = x / (1 - self.mu ** (self.k + 1)) chi = c / (1 - self.mu ** (self.k + 1)) # determine the new optimum shots distribution for the next # iteration of the optimizer s = np.ceil( (2 * self.lipschitz * self.stepsize * xi) / ((2 - self.lipschitz * self.stepsize) * (chi ** 2 + self.b * (self.mu ** self.k))) ) # apply an upper and lower bound on the new shot distributions, # to avoid the number of shots reducing below min(2, min_shots), # or growing too significantly. gamma = ( (self.stepsize - self.lipschitz * self.stepsize ** 2 / 2) * chi ** 2 - xi * self.lipschitz * self.stepsize ** 2 / (2 * s) ) / s argmax_gamma = np.unravel_index(np.argmax(gamma), gamma.shape) smax = max(s[argmax_gamma], 2) self.s[idx] = np.squeeze(np.int64(np.clip(s, min(2, self.min_shots), smax))) self.k += 1 # unwrap from list if one argument, cleaner return if len(new_args) == 1: return new_args[0] return new_args
print(q_network) # start the game obs = env.reset() episode_reward = 0 s = time.time() for global_step in range(TOTAL_TIMESTEPS): # put action logic here epsilon = linear_schedule(START_EPSILON, END_EPSILON, EXPLORATION_FRACTION * TOTAL_TIMESTEPS, global_step) if random.random() < epsilon: action = env.action_space.sample() else: logits = qnode(q_network, obs) action = min(1, np.argmax(logits)) # execute the game and log data. next_obs, reward, done, _ = env.step(action) episode_reward += reward # training rb.put((obs, action, reward, next_obs, done)) if global_step > LEARNING_STARTS and global_step % TRAIN_FREQ == 0: s_obs, s_actions, s_rewards, s_next_obses, s_dones = rb.sample( BATCH_SIZE) logits = np.array( [qnode(target_network, obs).tolist() for obs in s_next_obses]) target_max = np.argmax(logits, axis=1).round()