def optimizer(self, optimizer: Optional[Optimizer] = None) -> None: """ Set optimizer. Args: optimizer (Optimizer): optimizer to use with the generator. Raises: AquaError: invalid input. """ if optimizer: if isinstance(optimizer, Optimizer): self._optimizer = optimizer else: raise AquaError('Please provide an Optimizer object to use' 'as the generator optimizer.') else: self._optimizer = ADAM(maxiter=1, tol=1e-6, lr=1e-3, beta_1=0.7, beta_2=0.99, noise_factor=1e-6, eps=1e-6, amsgrad=True, snapshot_dir=self._snapshot_dir)
def train1(self, train_params, batch_size, verb=True): alpha, gamma, epsilon = train_params self.episode += 1 state = self.environment.reset() total_reward = 0 loss_list = [] # sample from policy done = False while not done: a = self.sample_action(state, [epsilon]) new_state, r, done, = self.environment.step(a) total_reward += r self.D.pop(0) self.D.append((state, a, r, new_state, done)) mB_ind = np.random.choice(range(self.memory_size), size=batch_size, replace=True) mB = np.array(self.D)[mB_ind] t = [] for j in range(batch_size): y_j = mB[j][2] * mB[j][-1] or mB[j][2] + gamma * max(self.get_qvalues([mB[j][3]], self.target_theta)[0]) t.append([y_j, mB[j][0], mB[j][1]]) t = np.array(t) adam = ADAM(maxiter=10, amsgrad=True, lr=alpha) if self.debug: start = datetime.now() self.theta, batch_losses, _ = adam.optimize(len(self.theta), lambda x: self.loss(x, t), initial_point=self.theta) #gradient_function=lambda x: self.gradient(x, t) loss_list.append(batch_losses) if self.debug: print(datetime.now() - start) if self.train_counter % self.configuration.target_replacement == 0: self.target_theta = copy.deepcopy(self.theta.copy()) self.train_counter = self.train_counter + 1 # update state state = new_state self.loss_mem.append(np.mean(np.array(loss_list).flatten())) if verb: print('train it:', self.episode, ' Return: ', total_reward, ' Loss: ', self.loss_mem[-1], ' epsilon ', epsilon) return total_reward
def __init__(self, n_features: int = 1, n_out: int = 1) -> None: """ Args: n_features: Dimension of input data vector. n_out: Dimension of the discriminator's output vector. """ super().__init__() self._n_features = n_features self._n_out = n_out self._discriminator = DiscriminatorNet(self._n_features, self._n_out) self._optimizer = ADAM(maxiter=1, tol=1e-6, lr=1e-3, beta_1=0.7, beta_2=0.99, noise_factor=1e-4, eps=1e-6, amsgrad=True) self._ret = {}
def make_opt(opt_str): if opt_str == "spsa": optimizer = SPSA(max_trials=100, save_steps=1, c0=4.0, skip_calibration=True) elif opt_str == "cobyla": optimizer = COBYLA(maxiter=1000, disp=False, rhobeg=1.0, tol=None) elif opt_str == "adam": optimizer = ADAM(maxiter=10000, tol=1e-6, lr=1e-3, beta_1=0.9, beta_2=0.99, noise_factor=1e-8, eps=1e-10) else: print('error in building OPTIMIZER: {} IT DOES NOT EXIST'.format(opt_str)) sys.exit(1) return optimizer
def minimize(self, cost_function, initial_params=None): """ Minimizes given cost function using optimizers from Qiskit Aqua. Args: cost_function(): python method which takes numpy.ndarray as input initial_params(np.ndarray): initial parameters to be used for optimization Returns: optimization_results(scipy.optimize.OptimizeResults): results of the optimization. """ history = [] if self.method == "SPSA": optimizer = SPSA(**self.options) elif self.method == "ADAM" or self.method == "AMSGRAD": if self.method == "AMSGRAD": self.options["amsgrad"] = True optimizer = ADAM(**self.options) number_of_variables = len(initial_params) if self.keep_value_history: cost_function_wrapper = recorder(cost_function) else: cost_function_wrapper = _CostFunctionWrapper(cost_function) gradient_function = None if hasattr(cost_function, "gradient") and callable( getattr(cost_function, "gradient")): gradient_function = cost_function.gradient solution, value, nit = optimizer.optimize( num_vars=number_of_variables, objective_function=cost_function_wrapper, initial_point=initial_params, gradient_function=gradient_function, ) if self.keep_value_history: nfev = len(cost_function_wrapper.history) history = cost_function_wrapper.history else: nfev = cost_function_wrapper.number_of_calls history = [] return optimization_result( opt_value=value, opt_params=solution, nit=nit, history=history, nfev=nfev, )
def train1(self, train_params, batch_size): alpha, gamma, epsilon = train_params s = self.environment.reset() done = False total_reward = 0 while not done: a = self.sample_action(s, [epsilon]) s1, r, done, = self.environment.step(a) total_reward += r self.D.pop(0) self.D.append((s, a, r, s1, done)) mB_ind = np.random.choice(range(self.memory_size), size=batch_size, replace=False) mB = np.array(self.D)[mB_ind] t = [] for j in range(batch_size): if mB[j][-1]: y_j = mB[j][2] else: y_j = mB[j][2] + gamma * max( self.get_qvalues([mB[j][3]], self.theta)[0]) y_j /= 2 y_j += 0.5 t.append([y_j, mB[j][0], mB[j][1]]) t = np.array(t) adam = ADAM(maxiter=10, lr=alpha) if self.debug: start = datetime.now() theta, _, _ = adam.optimize(3 * self.nb_qbits, lambda x: self.loss(x, t), initial_point=self.theta) if self.debug: print(datetime.now() - start) return total_reward
class NumpyDiscriminator(DiscriminativeNetwork): """ Discriminator """ CONFIGURATION = { 'name': 'NumpyDiscriminator', 'description': 'qGAN Discriminator Network', 'input_schema': { '$schema': 'http://json-schema.org/schema#', 'id': 'discriminator_schema', 'type': 'object', 'properties': { 'n_features': { 'type': 'integer', 'default': 1 }, 'n_out': { 'type': 'integer', 'default': 1 } }, 'additionalProperties': False } } def __init__(self, n_features=1, n_out=1): """ Initialize the discriminator. Args: n_features: int, Dimension of input data vector. n_out: int, Dimension of the discriminator's output vector. """ self._n_features = n_features self._n_out = n_out self._discriminator = DiscriminatorNet(self._n_features, self._n_out) self._optimizer = ADAM(maxiter=1, tol=1e-6, lr=1e-5, beta_1=0.7, beta_2=0.99, noise_factor=1e-4, eps=1e-6, amsgrad=True) self._ret = {} @classmethod def get_section_key_name(cls): return Pluggable.SECTION_KEY_DISCRIMINATIVE_NETWORK @staticmethod def check_pluggable_valid(): return def set_seed(self, seed): """ Set seed. Args: seed: int, seed Returns: """ np.random.RandomState(seed) return def save_model(self, snapshot_dir): """ Save discriminator model Args: snapshot_dir: str, directory path for saving the model Returns: """ # save self._discriminator.params_values np.save( os.path.join(snapshot_dir, 'np_discriminator_architecture.csv'), self._discriminator.architecture) np.save(os.path.join(snapshot_dir, 'np_discriminator_memory.csv'), self._discriminator.memory) np.save(os.path.join(snapshot_dir, 'np_discriminator_params.csv'), self._discriminator.parameters) self._optimizer.save_params(snapshot_dir) return def load_model(self, load_dir): """ Save discriminator model Args: dir: str, file with stored pytorch discriminator model to be loaded Returns: """ self._discriminator.architecture = np.load( os.path.join(load_dir, 'np_discriminator_architecture.csv')) self._discriminator.memory = np.load( os.path.join(load_dir, 'np_discriminator_memory.csv')) self._discriminator.parameters = np.load( os.path.join(load_dir, 'np_discriminator_params.csv')) self._optimizer.load_params(load_dir) return def get_discriminator(self): """ Get discriminator Returns: discriminator object """ return self._discriminator def get_label(self, x, detach=False): """ Get data sample labels, i.e. true or fake. Args: x: numpy array, Discriminator input, i.e. data sample. detach: depreciated for numpy network Returns: numpy array, Discriminator output, i.e. data label """ return self._discriminator.forward(x) def loss(self, x, y, weights=None): """ Loss function Args: x: array, sample label (equivalent to discriminator output) y: array, target label weights: array, customized scaling for each sample (optional) Returns: float, loss function """ if weights is not None: # Use weights as scaling factors for the samples and compute the sum return (-1) * np.dot( np.multiply( y, np.log(np.maximum(np.ones(np.shape(x)) * 1e-4, x))) + np.multiply( np.ones(np.shape(y)) - y, np.log( np.maximum( np.ones(np.shape(x)) * 1e-4, np.ones(np.shape(x)) - x))), weights) else: # Compute the mean return (-1) * np.mean( np.multiply( y, np.log(np.maximum(np.ones(np.shape(x)) * 1e-4, x))) + np.multiply( np.ones(np.shape(y)) - y, np.log( np.maximum( np.ones(np.shape(x)) * 1e-4, np.ones(np.shape(x)) - x)))) def _get_objective_function(self, data, weights): """ Get the objective function Args: data: training and generated data weights: weights corresponding to training resp. generated data Returns: objective function for the optimization """ real_batch = data[0] real_prob = weights[0] generated_batch = data[1] generated_prob = weights[1] def objective_function(params): self._discriminator.parameters = params # Train on Real Data prediction_real = self.get_label(real_batch) loss_real = self.loss(prediction_real, np.ones(np.shape(prediction_real)), real_prob) prediction_fake = self.get_label(generated_batch) loss_fake = self.loss(prediction_fake, np.zeros(np.shape(prediction_fake)), generated_prob) return 0.5 * (loss_real[0] + loss_fake[0]) return objective_function def _get_gradient_function(self, data, weights): """ Get the gradient function Args: data: training and generated data weights: weights corresponding to training resp. generated data Returns: Gradient function for the optimization """ real_batch = data[0] real_prob = weights[0] generated_batch = data[1] generated_prob = weights[1] def gradient_function(params): self._discriminator.parameters = params prediction_real = self.get_label(real_batch) grad_real = self._discriminator.backward( prediction_real, np.ones(np.shape(prediction_real)), real_prob) prediction_generated = self.get_label(generated_batch) grad_generated = self._discriminator.backward( prediction_generated, np.zeros(np.shape(prediction_generated)), generated_prob) return np.add(grad_real, grad_generated) return gradient_function def train(self, data, weights, penalty=False, quantum_instance=None, shots=None): """ Perform one training step w.r.t to the discriminator's parameters Args: data: [real_batch, generated_batch] real_batch: array, Training data batch. generated_batch: array, Generated data batch. weights: [real_prob, generated_prob] penalty: Boolean, Depreciated for classical networks. quantum_instance: QuantumInstance, Depreciated for classical networks. shots: int, Number of shots for hardware or qasm execution. Depreciated for classical networks. Returns: dict, with Discriminator loss and updated parameters. """ # Train on Generated Data self._shots = shots # Force single optimization iteration self._optimizer._maxiter = 1 self._optimizer._t = 0 objective = self._get_objective_function(data, weights) gradient = self._get_gradient_function(data, weights) self._discriminator.parameters, loss, nfev = \ self._optimizer.optimize(num_vars=len(self._discriminator.parameters), objective_function=objective, initial_point=np.array(self._discriminator.parameters), gradient_function=gradient) self._ret['loss'] = loss self._ret['params'] = self._discriminator.parameters return self._ret
class NumPyDiscriminator(DiscriminativeNetwork): """ Discriminator based on NumPy """ def __init__(self, n_features: int = 1, n_out: int = 1) -> None: """ Args: n_features: Dimension of input data vector. n_out: Dimension of the discriminator's output vector. """ super().__init__() self._n_features = n_features self._n_out = n_out self._discriminator = DiscriminatorNet(self._n_features, self._n_out) self._optimizer = ADAM(maxiter=1, tol=1e-6, lr=1e-3, beta_1=0.7, beta_2=0.99, noise_factor=1e-4, eps=1e-6, amsgrad=True) self._ret = {} def set_seed(self, seed): """ Set seed. Args: seed (int): seed """ aqua_globals.random_seed = seed def save_model(self, snapshot_dir): """ Save discriminator model Args: snapshot_dir (str): directory path for saving the model """ # save self._discriminator.params_values np.save(os.path.join(snapshot_dir, 'np_discriminator_architecture.csv'), self._discriminator.architecture) np.save(os.path.join(snapshot_dir, 'np_discriminator_memory.csv'), self._discriminator.memory) np.save(os.path.join(snapshot_dir, 'np_discriminator_params.csv'), self._discriminator.parameters) self._optimizer.save_params(snapshot_dir) def load_model(self, load_dir): """ Load discriminator model Args: load_dir (str): file with stored pytorch discriminator model to be loaded """ self._discriminator.architecture = \ np.load(os.path.join(load_dir, 'np_discriminator_architecture.csv')) self._discriminator.memory = np.load(os.path.join(load_dir, 'np_discriminator_memory.csv')) self._discriminator.parameters = np.load(os.path.join(load_dir, 'np_discriminator_params.csv')) self._optimizer.load_params(load_dir) @property def discriminator_net(self): """ Get discriminator Returns: DiscriminatorNet: discriminator object """ return self._discriminator @discriminator_net.setter def discriminator_net(self, net): self._discriminator = net def get_label(self, x, detach=False): # pylint: disable=arguments-differ,unused-argument """ Get data sample labels, i.e. true or fake. Args: x (numpy.ndarray): Discriminator input, i.e. data sample. detach (bool): depreciated for numpy network Returns: numpy.ndarray: Discriminator output, i.e. data label """ return self._discriminator.forward(x) def loss(self, x, y, weights=None): """ Loss function Args: x (numpy.ndarray): sample label (equivalent to discriminator output) y (numpy.ndarray): target label weights(numpy.ndarray): customized scaling for each sample (optional) Returns: float: loss function """ if weights is not None: # Use weights as scaling factors for the samples and compute the sum return (-1) * np.dot(np.multiply(y, np.log(np.maximum(np.ones(np.shape(x)) * 1e-4, x))) + np.multiply(np.ones(np.shape(y)) - y, np.log(np.maximum(np.ones(np.shape(x)) * 1e-4, np.ones(np.shape(x)) - x))), weights) else: # Compute the mean return (-1) * np.mean(np.multiply(y, np.log(np.maximum(np.ones(np.shape(x)) * 1e-4, x))) + np.multiply(np.ones(np.shape(y)) - y, np.log(np.maximum(np.ones(np.shape(x)) * 1e-4, np.ones(np.shape(x)) - x)))) def _get_objective_function(self, data, weights): """ Get the objective function Args: data (tuple): training and generated data weights (numpy.ndarray): weights corresponding to training resp. generated data Returns: objective_function: objective function for the optimization """ real_batch = data[0] real_prob = weights[0] generated_batch = data[1] generated_prob = weights[1] def objective_function(params): self._discriminator.parameters = params # Train on Real Data prediction_real = self.get_label(real_batch) loss_real = self.loss(prediction_real, np.ones(np.shape(prediction_real)), real_prob) prediction_fake = self.get_label(generated_batch) loss_fake = self.loss(prediction_fake, np.zeros(np.shape(prediction_fake)), generated_prob) return 0.5 * (loss_real[0] + loss_fake[0]) return objective_function def _get_gradient_function(self, data, weights): """ Get the gradient function Args: data (tuple): training and generated data weights (numpy.ndarray): weights corresponding to training resp. generated data Returns: gradient_function: Gradient function for the optimization """ real_batch = data[0] real_prob = weights[0] generated_batch = data[1] generated_prob = weights[1] def gradient_function(params): self._discriminator.parameters = params prediction_real = self.get_label(real_batch) grad_real = self._discriminator.backward(prediction_real, np.ones(np.shape(prediction_real)), real_prob) prediction_generated = self.get_label(generated_batch) grad_generated = self._discriminator.backward(prediction_generated, np.zeros( np.shape(prediction_generated)), generated_prob) return np.add(grad_real, grad_generated) return gradient_function def train(self, data, weights, penalty=False, quantum_instance=None, shots=None): """ Perform one training step w.r.t to the discriminator's parameters Args: data (tuple(numpy.ndarray, numpy.ndarray)): real_batch: array, Training data batch. generated_batch: array, Generated data batch. weights (tuple):real problem, generated problem penalty (bool): Depreciated for classical networks. quantum_instance (QuantumInstance): Depreciated for classical networks. shots (int): Number of shots for hardware or qasm execution. Ignored for classical networks. Returns: dict: with Discriminator loss and updated parameters. """ # Train on Generated Data # Force single optimization iteration self._optimizer._maxiter = 1 self._optimizer._t = 0 objective = self._get_objective_function(data, weights) gradient = self._get_gradient_function(data, weights) self._discriminator.parameters, loss, _ = \ self._optimizer.optimize(num_vars=len(self._discriminator.parameters), objective_function=objective, initial_point=np.array(self._discriminator.parameters), gradient_function=gradient) self._ret['loss'] = loss self._ret['params'] = self._discriminator.parameters return self._ret
class QuantumGenerator(GenerativeNetwork): """Quantum Generator. The quantum generator is a parametrized quantum circuit which can be trained with the :class:`~qiskit.aqua.algorithms.QGAN` algorithm to generate a quantum state which approximates the probability distribution of given training data. At the beginning of the training the parameters will be set randomly, thus, the output will is random. Throughout the training the quantum generator learns to represent the target distribution. Eventually, the trained generator can be used for state preparation e.g. in QAE. """ def __init__(self, bounds: np.ndarray, num_qubits: Union[List[int], np.ndarray], generator_circuit: Optional[ Union[UnivariateVariationalDistribution, MultivariateVariationalDistribution, QuantumCircuit]] = None, init_params: Optional[Union[List[float], np.ndarray]] = None, optimizer: Optional[Optimizer] = None, gradient_function: Optional[Union[Callable, Gradient]] = None, snapshot_dir: Optional[str] = None) -> None: """ Args: bounds: k min/max data values [[min_1,max_1],...,[min_k,max_k]], given input data dim k num_qubits: k numbers of qubits to determine representation resolution, i.e. n qubits enable the representation of 2**n values [n_1,..., n_k] generator_circuit: a UnivariateVariationalDistribution for univariate data, a MultivariateVariationalDistribution for multivariate data, or a QuantumCircuit implementing the generator. init_params: 1D numpy array or list, Initialization for the generator's parameters. optimizer: optimizer to be used for the training of the generator gradient_function: A Gradient object, or a function returning partial derivatives of the loss function w.r.t. the generator variational params. snapshot_dir: str or None, if not None save the optimizer's parameter after every update step to the given directory Raises: AquaError: Set multivariate variational distribution to represent multivariate data """ super().__init__() self._bounds = bounds self._num_qubits = num_qubits self.generator_circuit = generator_circuit if generator_circuit is None: circuit = QuantumCircuit(sum(num_qubits)) circuit.h(circuit.qubits) var_form = TwoLocal(sum(num_qubits), 'ry', 'cz', reps=1, entanglement='circular') circuit.compose(var_form, inplace=True) # Set generator circuit self.generator_circuit = circuit if isinstance(generator_circuit, (UnivariateVariationalDistribution, MultivariateVariationalDistribution)): warnings.warn( 'Passing a UnivariateVariationalDistribution or MultivariateVariational' 'Distribution as ``generator_circuit`` is deprecated as of Aqua 0.8.0 ' 'and the support will be removed no earlier than 3 months after the ' 'release date. You should pass a QuantumCircuit instead.', DeprecationWarning, stacklevel=2) self._free_parameters = generator_circuit._var_form_params self.generator_circuit = generator_circuit._var_form else: self._free_parameters = sorted(self.generator_circuit.parameters, key=lambda p: p.name) if init_params is None: init_params = aqua_globals.random.random( self.generator_circuit.num_parameters) * 2e-2 self._bound_parameters = init_params # Set optimizer for updating the generator network self._snapshot_dir = snapshot_dir self.optimizer = optimizer self._gradient_function = gradient_function if np.ndim(self._bounds) == 1: bounds = np.reshape(self._bounds, (1, len(self._bounds))) else: bounds = self._bounds for j, prec in enumerate(self._num_qubits): # prepare data grid for dim j grid = np.linspace(bounds[j, 0], bounds[j, 1], (2**prec)) if j == 0: if len(self._num_qubits) > 1: self._data_grid = [grid] else: self._data_grid = grid # type: ignore self._grid_elements = grid elif j == 1: self._data_grid.append(grid) temp = [] for g_e in self._grid_elements: for g in grid: temp0 = [g_e] temp0.append(g) temp.append(temp0) self._grid_elements = temp # type: ignore else: self._data_grid.append(grid) temp = [] for g_e in self._grid_elements: for g in grid: temp0 = deepcopy(g_e) temp0.append(g) temp.append(temp0) self._grid_elements = deepcopy(temp) # type: ignore self._data_grid = np.array(self._data_grid) # type: ignore self._seed = 7 self._shots = None self._discriminator = None self._ret = {} # type: Dict[str, Any] @property def parameter_values(self) -> Union[List, np.ndarray]: """ Get parameter values from the quantum generator Returns: Current parameter values """ return self._bound_parameters @parameter_values.setter def parameter_values(self, p_values: Union[List, np.ndarray]) -> None: """ Set parameter values for the quantum generator Args: p_values: Parameter values """ self._bound_parameters = p_values @property def seed(self) -> int: """ Get seed. """ return self._seed @seed.setter def seed(self, seed: int) -> None: """ Set seed. Args: seed (int): seed to use. """ self._seed = seed aqua_globals.random_seed = seed def set_seed(self, seed: int) -> None: """ Set seed. Args: seed (int): seed """ warnings.warn( 'Using set_seed() is deprecated as of Aqua 0.9.0 ' 'and support for it will be removed no earlier than ' 'three months after the release date. Please use ' 'the QuantumGenerator.seed property instead.', DeprecationWarning, stacklevel=2) aqua_globals.random_seed = seed @property def discriminator(self) -> DiscriminativeNetwork: """ Get discriminator. """ return self._discriminator @discriminator.setter def discriminator(self, discriminator: DiscriminativeNetwork) -> None: """ Set discriminator. Args: discriminator (DiscriminativeNetwork): Discriminator used to compute the loss function. """ self._discriminator = discriminator def set_discriminator(self, discriminator: DiscriminativeNetwork) -> None: """ Set discriminator network. Args: discriminator (DiscriminativeNetwork): Discriminator used to compute the loss function. """ warnings.warn( 'Using set_discriminator() is deprecated as of ' 'Aqua 0.9.0 and support for it will be removed no earlier' ' than three months after the release date. Please use ' 'the QuantumGenerator.discriminator property instead.', DeprecationWarning, stacklevel=2) self._discriminator = discriminator @property def optimizer(self) -> Optimizer: """ Get optimizer. """ return self._optimizer @optimizer.setter def optimizer(self, optimizer: Optional[Optimizer] = None) -> None: """ Set optimizer. Args: optimizer (Optimizer): optimizer to use with the generator. Raises: AquaError: invalid input. """ if optimizer: if isinstance(optimizer, Optimizer): self._optimizer = optimizer else: raise AquaError('Please provide an Optimizer object to use' 'as the generator optimizer.') else: self._optimizer = ADAM(maxiter=1, tol=1e-6, lr=1e-3, beta_1=0.7, beta_2=0.99, noise_factor=1e-6, eps=1e-6, amsgrad=True, snapshot_dir=self._snapshot_dir) def construct_circuit(self, params=None): """ Construct generator circuit. Args: params (list | dict): parameters which should be used to run the generator. Returns: Instruction: construct the quantum circuit and return as gate """ if params is None: return self.generator_circuit if isinstance(params, (list, np.ndarray)): params = dict(zip(self._free_parameters, params)) return self.generator_circuit.assign_parameters(params) # self.generator_circuit.build(qc=qc, q=q) # else: # generator_circuit_copy = deepcopy(self.generator_circuit) # generator_circuit_copy.params = params # generator_circuit_copy.build(qc=qc, q=q) # # return qc.copy(name='qc') # return qc.to_instruction() def get_output(self, quantum_instance, params=None, shots=None): """ Get classical data samples from the generator. Running the quantum generator circuit results in a quantum state. To train this generator with a classical discriminator, we need to sample classical outputs by measuring the quantum state and mapping them to feature space defined by the training data. Args: quantum_instance (QuantumInstance): Quantum Instance, used to run the generator circuit. params (numpy.ndarray): array or None, parameters which should be used to run the generator, if None use self._params shots (int): if not None use a number of shots that is different from the number set in quantum_instance Returns: list: generated samples, array: sample occurrence in percentage """ instance_shots = quantum_instance.run_config.shots q = QuantumRegister(sum(self._num_qubits), name='q') qc = QuantumCircuit(q) if params is None: params = self._bound_parameters qc.append(self.construct_circuit(params), q) if quantum_instance.is_statevector: pass else: c = ClassicalRegister(sum(self._num_qubits), name='c') qc.add_register(c) qc.measure(q, c) if shots is not None: quantum_instance.set_config(shots=shots) result = quantum_instance.execute(qc) generated_samples = [] if quantum_instance.is_statevector: result = result.get_statevector(qc) values = np.multiply(result, np.conj(result)) values = list(values.real) keys = [] for j in range(len(values)): keys.append(np.binary_repr(j, int(sum(self._num_qubits)))) else: result = result.get_counts(qc) keys = list(result) values = list(result.values()) values = [float(v) / np.sum(values) for v in values] generated_samples_weights = values for i, _ in enumerate(keys): index = 0 temp = [] for k, p in enumerate(self._num_qubits): bin_rep = 0 j = 0 while j < p: bin_rep += int(keys[i][index]) * 2**(int(p) - j - 1) j += 1 index += 1 if len(self._num_qubits) > 1: temp.append(self._data_grid[k][int(bin_rep)]) else: temp.append(self._data_grid[int(bin_rep)]) generated_samples.append(temp) # self.generator_circuit._probabilities = generated_samples_weights if shots is not None: # Restore the initial quantum_instance configuration quantum_instance.set_config(shots=instance_shots) return generated_samples, generated_samples_weights def loss(self, x, weights): # pylint: disable=arguments-differ """ Loss function for training the generator's parameters. Args: x (numpy.ndarray): sample label (equivalent to discriminator output) weights (numpy.ndarray): probability for measuring the sample Returns: float: loss function """ try: # pylint: disable=no-member loss = (-1) * np.dot(np.log(x).transpose(), weights) except Exception: # pylint: disable=broad-except loss = (-1) * np.dot(np.log(x), weights) return loss.flatten() def _get_objective_function(self, quantum_instance, discriminator): """ Get objective function Args: quantum_instance (QuantumInstance): used to run the quantum circuit. discriminator (torch.nn.Module): discriminator network to compute the sample labels. Returns: objective_function: objective function for quantum generator optimization """ def objective_function(params): """ Objective function Args: params (numpy.ndarray): generator parameters Returns: self.loss: loss function """ generated_data, generated_prob = self.get_output(quantum_instance, params=params, shots=self._shots) prediction_generated = discriminator.get_label(generated_data, detach=True) return self.loss(prediction_generated, generated_prob) return objective_function def _convert_to_gradient_function(self, gradient_object, quantum_instance, discriminator): """ Convert to gradient function Args: gradient_object (Gradient): the gradient object to be used to compute analytical gradients. quantum_instance (QuantumInstance): used to run the quantum circuit. discriminator (torch.nn.Module): discriminator network to compute the sample labels. Returns: gradient_function: gradient function that takes the current parameter values and returns partial derivatives of the loss function w.r.t. the variational parameters. """ def gradient_function(current_point): """ Gradient function Args: current_point (np.ndarray): Current values for the variational parameters. Returns: np.ndarray: array of partial derivatives of the loss function w.r.t. the variational parameters. """ free_params = self._free_parameters generated_data, _ = self.get_output(quantum_instance, params=current_point, shots=self._shots) prediction_generated = discriminator.get_label(generated_data, detach=True) op = ~CircuitStateFn(primitive=self.generator_circuit) grad_object = gradient_object.convert(operator=op, params=free_params) value_dict = { free_params[i]: current_point[i] for i in range(len(free_params)) } analytical_gradients = np.array( grad_object.assign_parameters(value_dict).eval()) loss_gradients = self.loss(prediction_generated, analytical_gradients).real return loss_gradients return gradient_function def train(self, quantum_instance=None, shots=None): """ Perform one training step w.r.t to the generator's parameters Args: quantum_instance (QuantumInstance): used to run the generator circuit. shots (int): Number of shots for hardware or qasm execution. Returns: dict: generator loss(float) and updated parameters (array). """ self._shots = shots # TODO Improve access to maxiter, say via options getter, to avoid private member access # and since not all optimizers have that exact naming figure something better as well to # allow the checking below to not have to warn if it has something else and max iterations # is truly 1 anyway. try: if self._optimizer._maxiter != 1: warnings.warn( 'Please set the the optimizer maxiter argument to 1 ' 'to ensure that the generator ' 'and discriminator are updated in an alternating fashion.') except AttributeError: maxiter = self._optimizer._options.get('maxiter') if maxiter is not None and maxiter != 1: warnings.warn( 'Please set the the optimizer maxiter argument to 1 ' 'to ensure that the generator ' 'and discriminator are updated in an alternating fashion.') elif maxiter is None: warnings.warn( 'Please ensure the optimizer max iterations are set to 1 ' 'to ensure that the generator ' 'and discriminator are updated in an alternating fashion.') if isinstance(self._gradient_function, Gradient): self._gradient_function = self._convert_to_gradient_function( self._gradient_function, quantum_instance, self._discriminator) objective = self._get_objective_function(quantum_instance, self._discriminator) self._bound_parameters, loss, _ = self._optimizer.optimize( num_vars=len(self._bound_parameters), objective_function=objective, initial_point=self._bound_parameters, gradient_function=self._gradient_function) self._ret['loss'] = loss self._ret['params'] = self._bound_parameters return self._ret
def __init__(self, bounds: np.ndarray, num_qubits: List[int], generator_circuit: Optional[ Union[UnivariateVariationalDistribution, MultivariateVariationalDistribution, QuantumCircuit]] = None, init_params: Optional[Union[List[float], np.ndarray]] = None, snapshot_dir: Optional[str] = None) -> None: """ Args: bounds: k min/max data values [[min_1,max_1],...,[min_k,max_k]], given input data dim k num_qubits: k numbers of qubits to determine representation resolution, i.e. n qubits enable the representation of 2**n values [n_1,..., n_k] generator_circuit: a UnivariateVariationalDistribution for univariate data, a MultivariateVariationalDistribution for multivariate data, or a QuantumCircuit implementing the generator. init_params: 1D numpy array or list, Initialization for the generator's parameters. snapshot_dir: str or None, if not None save the optimizer's parameter after every update step to the given directory Raises: AquaError: Set multivariate variational distribution to represent multivariate data """ super().__init__() self._bounds = bounds self._num_qubits = num_qubits self.generator_circuit = generator_circuit if self.generator_circuit is None: entangler_map = [] if np.sum(num_qubits) > 2: for i in range(int(np.sum(num_qubits))): entangler_map.append( [i, int(np.mod(i + 1, np.sum(num_qubits)))]) else: if np.sum(num_qubits) > 1: entangler_map.append([0, 1]) if len(num_qubits) > 1: num_qubits = list(map(int, num_qubits)) low = bounds[:, 0].tolist() high = bounds[:, 1].tolist() init_dist = MultivariateUniformDistribution(num_qubits, low=low, high=high) q = QuantumRegister(sum(num_qubits)) qc = QuantumCircuit(q) init_dist.build(qc, q) init_distribution = Custom(num_qubits=sum(num_qubits), circuit=qc) # Set variational form var_form = RY(sum(num_qubits), depth=1, initial_state=init_distribution, entangler_map=entangler_map, entanglement_gate='cz') if init_params is None: init_params = aqua_globals.random.rand( var_form.num_parameters) * 2 * 1e-2 # Set generator circuit self.generator_circuit = MultivariateVariationalDistribution( num_qubits, var_form, init_params, low=low, high=high) else: init_dist = UniformDistribution(sum(num_qubits), low=bounds[0], high=bounds[1]) q = QuantumRegister(sum(num_qubits), name='q') qc = QuantumCircuit(q) init_dist.build(qc, q) init_distribution = Custom(num_qubits=sum(num_qubits), circuit=qc) var_form = RY(sum(num_qubits), depth=1, initial_state=init_distribution, entangler_map=entangler_map, entanglement_gate='cz') if init_params is None: init_params = aqua_globals.random.rand( var_form.num_parameters) * 2 * 1e-2 # Set generator circuit self.generator_circuit = UnivariateVariationalDistribution( int(np.sum(num_qubits)), var_form, init_params, low=bounds[0], high=bounds[1]) if len(num_qubits) > 1: if isinstance(self.generator_circuit, MultivariateVariationalDistribution): pass else: raise AquaError('Set multivariate variational distribution ' 'to represent multivariate data') else: if isinstance(self.generator_circuit, UnivariateVariationalDistribution): pass else: raise AquaError('Set univariate variational distribution ' 'to represent univariate data') # Set optimizer for updating the generator network self._optimizer = ADAM(maxiter=1, tol=1e-6, lr=1e-3, beta_1=0.7, beta_2=0.99, noise_factor=1e-6, eps=1e-6, amsgrad=True, snapshot_dir=snapshot_dir) if np.ndim(self._bounds) == 1: bounds = np.reshape(self._bounds, (1, len(self._bounds))) else: bounds = self._bounds for j, prec in enumerate(self._num_qubits): # prepare data grid for dim j grid = np.linspace(bounds[j, 0], bounds[j, 1], (2**prec)) if j == 0: if len(self._num_qubits) > 1: self._data_grid = [grid] else: self._data_grid = grid self._grid_elements = grid elif j == 1: self._data_grid.append(grid) temp = [] for g_e in self._grid_elements: for g in grid: temp0 = [g_e] temp0.append(g) temp.append(temp0) self._grid_elements = temp else: self._data_grid.append(grid) temp = [] for g_e in self._grid_elements: for g in grid: temp0 = deepcopy(g_e) temp0.append(g) temp.append(temp0) self._grid_elements = deepcopy(temp) self._data_grid = np.array(self._data_grid) self._shots = None self._discriminator = None self._ret = {}
class QuantumGenerator(GenerativeNetwork): """ Quantum Generator. The quantum generator is a parametrized quantum circuit which can be trained with the :class:`~qiskit.aqua.algorithms.QGAN` algorithm to generate a quantum state which approximates the probability distribution of given training data. At the beginning of the training the parameters will be set randomly, thus, the output will is random. Throughout the training the quantum generator learns to represent the target distribution. Eventually, the trained generator can be used for state preparation e.g. in QAE. """ def __init__(self, bounds: np.ndarray, num_qubits: List[int], generator_circuit: Optional[ Union[UnivariateVariationalDistribution, MultivariateVariationalDistribution, QuantumCircuit]] = None, init_params: Optional[Union[List[float], np.ndarray]] = None, snapshot_dir: Optional[str] = None) -> None: """ Args: bounds: k min/max data values [[min_1,max_1],...,[min_k,max_k]], given input data dim k num_qubits: k numbers of qubits to determine representation resolution, i.e. n qubits enable the representation of 2**n values [n_1,..., n_k] generator_circuit: a UnivariateVariationalDistribution for univariate data, a MultivariateVariationalDistribution for multivariate data, or a QuantumCircuit implementing the generator. init_params: 1D numpy array or list, Initialization for the generator's parameters. snapshot_dir: str or None, if not None save the optimizer's parameter after every update step to the given directory Raises: AquaError: Set multivariate variational distribution to represent multivariate data """ super().__init__() self._bounds = bounds self._num_qubits = num_qubits self.generator_circuit = generator_circuit if self.generator_circuit is None: entangler_map = [] if np.sum(num_qubits) > 2: for i in range(int(np.sum(num_qubits))): entangler_map.append( [i, int(np.mod(i + 1, np.sum(num_qubits)))]) else: if np.sum(num_qubits) > 1: entangler_map.append([0, 1]) if len(num_qubits) > 1: num_qubits = list(map(int, num_qubits)) low = bounds[:, 0].tolist() high = bounds[:, 1].tolist() init_dist = MultivariateUniformDistribution(num_qubits, low=low, high=high) q = QuantumRegister(sum(num_qubits)) qc = QuantumCircuit(q) init_dist.build(qc, q) init_distribution = Custom(num_qubits=sum(num_qubits), circuit=qc) # Set variational form var_form = RY(sum(num_qubits), depth=1, initial_state=init_distribution, entangler_map=entangler_map, entanglement_gate='cz') if init_params is None: init_params = aqua_globals.random.rand( var_form.num_parameters) * 2 * 1e-2 # Set generator circuit self.generator_circuit = MultivariateVariationalDistribution( num_qubits, var_form, init_params, low=low, high=high) else: init_dist = UniformDistribution(sum(num_qubits), low=bounds[0], high=bounds[1]) q = QuantumRegister(sum(num_qubits), name='q') qc = QuantumCircuit(q) init_dist.build(qc, q) init_distribution = Custom(num_qubits=sum(num_qubits), circuit=qc) var_form = RY(sum(num_qubits), depth=1, initial_state=init_distribution, entangler_map=entangler_map, entanglement_gate='cz') if init_params is None: init_params = aqua_globals.random.rand( var_form.num_parameters) * 2 * 1e-2 # Set generator circuit self.generator_circuit = UnivariateVariationalDistribution( int(np.sum(num_qubits)), var_form, init_params, low=bounds[0], high=bounds[1]) if len(num_qubits) > 1: if isinstance(self.generator_circuit, MultivariateVariationalDistribution): pass else: raise AquaError('Set multivariate variational distribution ' 'to represent multivariate data') else: if isinstance(self.generator_circuit, UnivariateVariationalDistribution): pass else: raise AquaError('Set univariate variational distribution ' 'to represent univariate data') # Set optimizer for updating the generator network self._optimizer = ADAM(maxiter=1, tol=1e-6, lr=1e-3, beta_1=0.7, beta_2=0.99, noise_factor=1e-6, eps=1e-6, amsgrad=True, snapshot_dir=snapshot_dir) if np.ndim(self._bounds) == 1: bounds = np.reshape(self._bounds, (1, len(self._bounds))) else: bounds = self._bounds for j, prec in enumerate(self._num_qubits): # prepare data grid for dim j grid = np.linspace(bounds[j, 0], bounds[j, 1], (2**prec)) if j == 0: if len(self._num_qubits) > 1: self._data_grid = [grid] else: self._data_grid = grid self._grid_elements = grid elif j == 1: self._data_grid.append(grid) temp = [] for g_e in self._grid_elements: for g in grid: temp0 = [g_e] temp0.append(g) temp.append(temp0) self._grid_elements = temp else: self._data_grid.append(grid) temp = [] for g_e in self._grid_elements: for g in grid: temp0 = deepcopy(g_e) temp0.append(g) temp.append(temp0) self._grid_elements = deepcopy(temp) self._data_grid = np.array(self._data_grid) self._shots = None self._discriminator = None self._ret = {} def set_seed(self, seed): """ Set seed. Args: seed (int): seed """ aqua_globals.random_seed = seed def set_discriminator(self, discriminator): """ Set discriminator network. Args: discriminator (Discriminator): Discriminator used to compute the loss function. """ self._discriminator = discriminator def construct_circuit(self, params=None): """ Construct generator circuit. Args: params (numpy.ndarray): parameters which should be used to run the generator, if None use self._params Returns: Instruction: construct the quantum circuit and return as gate """ q = QuantumRegister(sum(self._num_qubits), name='q') qc = QuantumCircuit(q) if params is None: self.generator_circuit.build(qc=qc, q=q) else: generator_circuit_copy = deepcopy(self.generator_circuit) generator_circuit_copy.params = params generator_circuit_copy.build(qc=qc, q=q) # return qc.copy(name='qc') return qc.to_instruction() def get_output(self, quantum_instance, qc_state_in=None, params=None, shots=None): """ Get classical data samples from the generator. Running the quantum generator circuit results in a quantum state. To train this generator with a classical discriminator, we need to sample classical outputs by measuring the quantum state and mapping them to feature space defined by the training data. Args: quantum_instance (QuantumInstance): Quantum Instance, used to run the generator circuit. qc_state_in (QuantumCircuit): deprecated params (numpy.ndarray): array or None, parameters which should be used to run the generator, if None use self._params shots (int): if not None use a number of shots that is different from the number set in quantum_instance Returns: list: generated samples, array: sample occurrence in percentage """ instance_shots = quantum_instance.run_config.shots q = QuantumRegister(sum(self._num_qubits), name='q') qc = QuantumCircuit(q) qc.append(self.construct_circuit(params), q) if quantum_instance.is_statevector: pass else: c = ClassicalRegister(sum(self._num_qubits), name='c') qc.add_register(c) qc.measure(q, c) if shots is not None: quantum_instance.set_config(shots=shots) result = quantum_instance.execute(qc) generated_samples = [] if quantum_instance.is_statevector: result = result.get_statevector(qc) values = np.multiply(result, np.conj(result)) values = list(values.real) keys = [] for j in range(len(values)): keys.append(np.binary_repr(j, int(sum(self._num_qubits)))) else: result = result.get_counts(qc) keys = list(result) values = list(result.values()) values = [float(v) / np.sum(values) for v in values] generated_samples_weights = values for i, _ in enumerate(keys): index = 0 temp = [] for k, p in enumerate(self._num_qubits): bin_rep = 0 j = 0 while j < p: bin_rep += int(keys[i][index]) * 2**(int(p) - j - 1) j += 1 index += 1 if len(self._num_qubits) > 1: temp.append(self._data_grid[k][int(bin_rep)]) else: temp.append(self._data_grid[int(bin_rep)]) generated_samples.append(temp) self.generator_circuit._probabilities = generated_samples_weights if shots is not None: # Restore the initial quantum_instance configuration quantum_instance.set_config(shots=instance_shots) return generated_samples, generated_samples_weights def loss(self, x, weights): # pylint: disable=arguments-differ """ Loss function for training the generator's parameters. Args: x (numpy.ndarray): sample label (equivalent to discriminator output) weights (numpy.ndarray): probability for measuring the sample Returns: float: loss function """ try: # pylint: disable=no-member loss = (-1) * np.dot(np.log(x).transpose(), weights) except Exception: # pylint: disable=broad-except loss = (-1) * np.dot(np.log(x), weights) return loss.flatten() def _get_objective_function(self, quantum_instance, discriminator): """ Get objective function Args: quantum_instance (QuantumInstance): used to run the quantum circuit. discriminator (torch.nn.Module): discriminator network to compute the sample labels. Returns: objective_function: objective function for quantum generator optimization """ def objective_function(params): """ Objective function Args: params (numpy.ndarray): generator parameters Returns: self.loss: loss function """ generated_data, generated_prob = self.get_output(quantum_instance, params=params, shots=self._shots) prediction_generated = discriminator.get_label(generated_data, detach=True) return self.loss(prediction_generated, generated_prob) return objective_function def train(self, quantum_instance=None, shots=None): """ Perform one training step w.r.t to the generator's parameters Args: quantum_instance (QuantumInstance): used to run the generator circuit. shots (int): Number of shots for hardware or qasm execution. Returns: dict: generator loss(float) and updated parameters (array). """ self._shots = shots # Force single optimization iteration self._optimizer._maxiter = 1 self._optimizer._t = 0 objective = self._get_objective_function(quantum_instance, self._discriminator) self.generator_circuit.params, loss, _ = \ self._optimizer.optimize(num_vars=len(self.generator_circuit.params), objective_function=objective, initial_point=self.generator_circuit.params) self._ret['loss'] = loss self._ret['params'] = self.generator_circuit.params return self._ret
def test_adam(self): """ adam test """ optimizer = ADAM(maxiter=10000, tol=1e-06) res = self._optimize(optimizer) self.assertLessEqual(res[2], 10000)
class QuantumGenerator(GenerativeNetwork): """ Generator """ CONFIGURATION = { 'name': 'QuantumGenerator', 'description': 'qGAN Generator Network', 'input_schema': { '$schema': 'http://json-schema.org/draft-07/schema#', 'id': 'generator_schema', 'type': 'object', 'properties': { 'bounds': { 'type': 'array' }, 'num_qubits': { 'type': 'array' }, 'init_params': { 'type': ['array', 'null'], 'default': None }, 'snapshot_dir': { 'type': ['string', 'null'], 'default': None } }, 'additionalProperties': False } } def __init__(self, bounds, num_qubits, generator_circuit=None, init_params=None, snapshot_dir=None): """ Initialize the generator network. Args: bounds (numpy.ndarray): k min/max data values [[min_1,max_1],...,[min_k,max_k]], given input data dim k num_qubits (list): k numbers of qubits to determine representation resolution, i.e. n qubits enable the representation of 2**n values [n_1,..., n_k] generator_circuit (Union): generator circuit UnivariateVariationalDistribution for univariate data/ MultivariateVariationalDistribution for multivariate data, Quantum circuit to implement the generator. init_params (Union(list, numpy.ndarray)): 1D numpy array or list, Initialization for the generator's parameters. snapshot_dir (str): str or None, if not None save the optimizer's parameter after every update step to the given directory Raises: AquaError: Set multivariate variational distribution to represent multivariate data """ super().__init__() self._bounds = bounds self._num_qubits = num_qubits self.generator_circuit = generator_circuit if self.generator_circuit is None: entangler_map = [] if np.sum(num_qubits) > 2: for i in range(int(np.sum(num_qubits))): entangler_map.append( [i, int(np.mod(i + 1, np.sum(num_qubits)))]) else: if np.sum(num_qubits) > 1: entangler_map.append([0, 1]) if len(num_qubits) > 1: num_qubits = list(map(int, num_qubits)) low = bounds[:, 0].tolist() high = bounds[:, 1].tolist() init_dist = MultivariateUniformDistribution(num_qubits, low=low, high=high) q = QuantumRegister(sum(num_qubits)) qc = QuantumCircuit(q) init_dist.build(qc, q) init_distribution = Custom(num_qubits=sum(num_qubits), circuit=qc) # Set variational form var_form = RY(sum(num_qubits), depth=1, initial_state=init_distribution, entangler_map=entangler_map, entanglement_gate='cz') if init_params is None: init_params = aqua_globals.random.rand( var_form.num_parameters) * 2 * 1e-2 # Set generator circuit self.generator_circuit = MultivariateVariationalDistribution( num_qubits, var_form, init_params, low=low, high=high) else: init_dist = UniformDistribution(sum(num_qubits), low=bounds[0], high=bounds[1]) q = QuantumRegister(sum(num_qubits), name='q') qc = QuantumCircuit(q) init_dist.build(qc, q) init_distribution = Custom(num_qubits=sum(num_qubits), circuit=qc) var_form = RY(sum(num_qubits), depth=1, initial_state=init_distribution, entangler_map=entangler_map, entanglement_gate='cz') if init_params is None: init_params = aqua_globals.random.rand( var_form.num_parameters) * 2 * 1e-2 # Set generator circuit self.generator_circuit = UnivariateVariationalDistribution( int(np.sum(num_qubits)), var_form, init_params, low=bounds[0], high=bounds[1]) if len(num_qubits) > 1: if isinstance(self.generator_circuit, MultivariateVariationalDistribution): pass else: raise AquaError('Set multivariate variational distribution ' 'to represent multivariate data') else: if isinstance(self.generator_circuit, UnivariateVariationalDistribution): pass else: raise AquaError('Set univariate variational distribution ' 'to represent univariate data') # Set optimizer for updating the generator network self._optimizer = ADAM(maxiter=1, tol=1e-6, lr=1e-3, beta_1=0.7, beta_2=0.99, noise_factor=1e-6, eps=1e-6, amsgrad=True, snapshot_dir=snapshot_dir) if np.ndim(self._bounds) == 1: bounds = np.reshape(self._bounds, (1, len(self._bounds))) else: bounds = self._bounds for j, prec in enumerate(self._num_qubits): # prepare data grid for dim j grid = np.linspace(bounds[j, 0], bounds[j, 1], (2**prec)) if j == 0: if len(self._num_qubits) > 1: self._data_grid = [grid] else: self._data_grid = grid self._grid_elements = grid elif j == 1: self._data_grid.append(grid) temp = [] for g_e in self._grid_elements: for g in grid: temp0 = [g_e] temp0.append(g) temp.append(temp0) self._grid_elements = temp else: self._data_grid.append(grid) temp = [] for g_e in self._grid_elements: for g in grid: temp0 = deepcopy(g_e) temp0.append(g) temp.append(temp0) self._grid_elements = deepcopy(temp) self._data_grid = np.array(self._data_grid) self._shots = None self._discriminator = None self._ret = {} @classmethod def init_params(cls, params): """ Initialize via parameters dictionary and algorithm input instance. Args: params (dict): parameters dictionary Returns: QuantumGenerator: vqe object Raises: AquaError: invalid input """ generator_params = params.get(Pluggable.SECTION_KEY_GENERATIVE_NETWORK) bounds = generator_params.get('bounds') if bounds is None: raise AquaError("Data value bounds are required.") num_qubits = generator_params.get('num_qubits') if num_qubits is None: raise AquaError("Numbers of qubits per dimension required.") init_params = generator_params.get('init_params') snapshot_dir = generator_params.get('snapshot_dir') return cls(bounds, num_qubits, generator_circuit=None, init_params=init_params, snapshot_dir=snapshot_dir) @classmethod def get_section_key_name(cls): return Pluggable.SECTION_KEY_GENERATIVE_NETWORK def set_seed(self, seed): """ Set seed. Args: seed (int): seed """ aqua_globals.random_seed = seed def set_discriminator(self, discriminator): """ Set discriminator Args: discriminator (Discriminator): Discriminator used to compute the loss function. """ self._discriminator = discriminator def construct_circuit(self, params=None): """ Construct generator circuit. Args: params (numpy.ndarray): parameters which should be used to run the generator, if None use self._params Returns: Instruction: construct the quantum circuit and return as gate """ q = QuantumRegister(sum(self._num_qubits), name='q') qc = QuantumCircuit(q) if params is None: self.generator_circuit.build(qc=qc, q=q) else: generator_circuit_copy = deepcopy(self.generator_circuit) generator_circuit_copy.params = params generator_circuit_copy.build(qc=qc, q=q) # return qc.copy(name='qc') return qc.to_instruction() def get_output(self, quantum_instance, qc_state_in=None, params=None, shots=None): """ Get data samples from the generator. Args: quantum_instance (QuantumInstance): Quantum Instance, used to run the generator circuit. qc_state_in (QuantumCircuit): depreciated params (numpy.ndarray): array or None, parameters which should be used to run the generator, if None use self._params shots (int): if not None use a number of shots that is different from the number set in quantum_instance Returns: list: generated samples, array: sample occurrence in percentage """ instance_shots = quantum_instance.run_config.shots q = QuantumRegister(sum(self._num_qubits), name='q') qc = QuantumCircuit(q) qc.append(self.construct_circuit(params), q) if quantum_instance.is_statevector: pass else: c = ClassicalRegister(sum(self._num_qubits), name='c') qc.add_register(c) qc.measure(q, c) if shots is not None: quantum_instance.set_config(shots=shots) result = quantum_instance.execute(qc) generated_samples = [] if quantum_instance.is_statevector: result = result.get_statevector(qc) values = np.multiply(result, np.conj(result)) values = list(values.real) keys = [] for j in range(len(values)): keys.append(np.binary_repr(j, int(sum(self._num_qubits)))) else: result = result.get_counts(qc) keys = list(result) values = list(result.values()) values = [float(v) / np.sum(values) for v in values] generated_samples_weights = values for i, _ in enumerate(keys): index = 0 temp = [] for k, p in enumerate(self._num_qubits): bin_rep = 0 j = 0 while j < p: bin_rep += int(keys[i][index]) * 2**(int(p) - j - 1) j += 1 index += 1 if len(self._num_qubits) > 1: temp.append(self._data_grid[k][int(bin_rep)]) else: temp.append(self._data_grid[int(bin_rep)]) generated_samples.append(temp) self.generator_circuit._probabilities = generated_samples_weights if shots is not None: # Restore the initial quantum_instance configuration quantum_instance.set_config(shots=instance_shots) return generated_samples, generated_samples_weights def loss(self, x, weights): # pylint: disable=arguments-differ """ Loss function Args: x (numpy.ndarray): sample label (equivalent to discriminator output) weights (numpy.ndarray): probability for measuring the sample Returns: float: loss function """ try: # pylint: disable=no-member loss = (-1) * np.dot(np.log(x).transpose(), weights) except Exception: # pylint: disable=broad-except loss = (-1) * np.dot(np.log(x), weights) return loss.flatten() def _get_objective_function(self, quantum_instance, discriminator): """ Get objective function Args: quantum_instance (QuantumInstance): used to run the quantum circuit. discriminator (torch.nn.Module): discriminator network to compute the sample labels. Returns: objective_function: objective function for quantum generator optimization """ def objective_function(params): """ Objective function Args: params (numpy.ndarray): generator parameters Returns: self.loss: loss function """ generated_data, generated_prob = self.get_output(quantum_instance, params=params, shots=self._shots) prediction_generated = discriminator.get_label(generated_data, detach=True) return self.loss(prediction_generated, generated_prob) return objective_function def train(self, quantum_instance=None, shots=None): """ Perform one training step w.r.t to the generator's parameters Args: quantum_instance (QuantumInstance): used to run the generator circuit. shots (int): Number of shots for hardware or qasm execution. Returns: dict: generator loss(float) and updated parameters (array). """ self._shots = shots # Force single optimization iteration self._optimizer._maxiter = 1 self._optimizer._t = 0 objective = self._get_objective_function(quantum_instance, self._discriminator) self.generator_circuit.params, loss, _ = \ self._optimizer.optimize(num_vars=len(self.generator_circuit.params), objective_function=objective, initial_point=self.generator_circuit.params) self._ret['loss'] = loss self._ret['params'] = self.generator_circuit.params return self._ret
chosen_neg_label_idx = neg_label[np.random.permutation(len(neg_label))[:neg_sample]] print("Postive sample num: %d" % len(pos_label)) print("Negative sample num: %d" % len(neg_label)) # Construct dict to feed QSVM training_input = { 0: df_train_q[chosen_pos_label_idx], 1: df_train_q[chosen_neg_label_idx] } test_input = df_test_q ###### data prepared print("data prepared.") ###### building quantum dude seed = 10598 # seed = 1024 var_form = variational_forms.RYRZ(2) feature_map = ZZFeatureMap(feature_dimension=len(mvp_col), reps=2, entanglement='linear') qsvm = VQC(ADAM(100), feature_map, var_form, training_input) backend = BasicAer.get_backend('qasm_simulator') quantum_instance = QuantumInstance(backend, shots=1024, seed_simulator=seed, seed_transpiler=seed, optimization_level=3) result = qsvm.run(quantum_instance) y_pred = qsvm.predict(df_train_q)[1] print("Final train acc: %f\nFinal train F1:%f" % (np.mean(y_pred == y_train), f1_score(y_pred, y_train))) y_pred = qsvm.predict(df_test_q)[1] # print(y_pred) record_test_result_for_kaggle(y_pred, submission_file="quantum_submission.csv")
def __init__(self, bounds: np.ndarray, num_qubits: List[int], generator_circuit: Optional[ Union[UnivariateVariationalDistribution, MultivariateVariationalDistribution, QuantumCircuit]] = None, init_params: Optional[Union[List[float], np.ndarray]] = None, optimizer: Optional[Optimizer] = None, snapshot_dir: Optional[str] = None) -> None: """ Args: bounds: k min/max data values [[min_1,max_1],...,[min_k,max_k]], given input data dim k num_qubits: k numbers of qubits to determine representation resolution, i.e. n qubits enable the representation of 2**n values [n_1,..., n_k] generator_circuit: a UnivariateVariationalDistribution for univariate data, a MultivariateVariationalDistribution for multivariate data, or a QuantumCircuit implementing the generator. init_params: 1D numpy array or list, Initialization for the generator's parameters. optimizer: optimizer to be used for the training of the generator snapshot_dir: str or None, if not None save the optimizer's parameter after every update step to the given directory Raises: AquaError: Set multivariate variational distribution to represent multivariate data """ super().__init__() self._bounds = bounds self._num_qubits = num_qubits self.generator_circuit = generator_circuit if generator_circuit is None: circuit = QuantumCircuit(sum(num_qubits)) circuit.h(circuit.qubits) var_form = TwoLocal(sum(num_qubits), 'ry', 'cz', reps=1, entanglement='circular') circuit.compose(var_form, inplace=True) # Set generator circuit self.generator_circuit = circuit if isinstance(generator_circuit, (UnivariateVariationalDistribution, MultivariateVariationalDistribution)): warnings.warn( 'Passing a UnivariateVariationalDistribution or MultivariateVariational' 'Distribution is as ``generator_circuit`` is deprecated as of Aqua 0.8.0 ' 'and the support will be removed no earlier than 3 months after the ' 'release data. You should pass as QuantumCircuit instead.', DeprecationWarning, stacklevel=2) self._free_parameters = generator_circuit._var_form_params self.generator_circuit = generator_circuit._var_form else: self._free_parameters = sorted(self.generator_circuit.parameters, key=lambda p: p.name) if init_params is None: init_params = aqua_globals.random.random( self.generator_circuit.num_parameters) * 2e-2 self._bound_parameters = init_params # Set optimizer for updating the generator network if optimizer: self._optimizer = optimizer else: self._optimizer = ADAM(maxiter=1, tol=1e-6, lr=1e-3, beta_1=0.7, beta_2=0.99, noise_factor=1e-6, eps=1e-6, amsgrad=True, snapshot_dir=snapshot_dir) if np.ndim(self._bounds) == 1: bounds = np.reshape(self._bounds, (1, len(self._bounds))) else: bounds = self._bounds for j, prec in enumerate(self._num_qubits): # prepare data grid for dim j grid = np.linspace(bounds[j, 0], bounds[j, 1], (2**prec)) if j == 0: if len(self._num_qubits) > 1: self._data_grid = [grid] else: self._data_grid = grid # type: ignore self._grid_elements = grid elif j == 1: self._data_grid.append(grid) temp = [] for g_e in self._grid_elements: for g in grid: temp0 = [g_e] temp0.append(g) temp.append(temp0) self._grid_elements = temp # type: ignore else: self._data_grid.append(grid) temp = [] for g_e in self._grid_elements: for g in grid: temp0 = deepcopy(g_e) temp0.append(g) temp.append(temp0) self._grid_elements = deepcopy(temp) # type: ignore self._data_grid = np.array(self._data_grid) # type: ignore self._shots = None self._discriminator = None self._ret = {} # type: Dict[str, Any]