def __grad_shift_rule(unitary, g, i, variable, hamiltonian): ''' function for getting the gradients of directly differentiable gates. Expects precompiled circuits. :param unitary: QCircuit: the QCircuit object containing the gate to be differentiated :param g: a parametrized: the gate being differentiated :param i: Int: the position in unitary at which g appears :param variable: Variable or String: the variable with respect to which gate g is being differentiated :param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary is contained within an ExpectationValue :return: an Objective, whose calculation yields the gradient of g w.r.t variable ''' # possibility for overwride in custom gate construction if hasattr(g, "shifted_gates"): inner_grad=__grad_inner(g.parameter, variable) shifted = g.shifted_gates() dOinc = Objective() for x in shifted: w,g = x Ux = unitary.replace_gates(positions=[i], circuits=[g]) wx = w*inner_grad Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian) dOinc += wx*Ex return dOinc else: raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
def qng_grad_gaussian(unitary, g, i, hamiltonian): ''' qng function for getting the gradients of gaussian gates. THIS variant of the function does not seek out underlying gate parameters; it treats each variable 'as is'. This treatment is necessary for the QNG but is incorrect elsewhere. :param unitary: QCircuit: the QCircuit object containing the gate to be differentiated :param g: a parametrized: the gate being differentiated :param i: Int: the position in unitary at which g appears :param variable: Variable or String: the variable with respect to which gate g is being differentiated :param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary is contained within an ExpectationValue :return: a list of objectives; the gradient of the Exp. with respect to each of its (internal) parameters ''' if not hasattr(g, "shift"): raise TequilaException("No shift found for gate {}".format(g)) # neo_a and neo_b are the shifted versions of gate g needed to evaluate its gradient shift_a = g._parameter + numpy.pi / (4 * g.shift) shift_b = g._parameter - numpy.pi / (4 * g.shift) neo_a = copy.deepcopy(g) neo_a._parameter = shift_a neo_b = copy.deepcopy(g) neo_b._parameter = shift_b U1 = unitary.replace_gates(positions=[i], circuits=[neo_a]) w1 = g.shift U2 = unitary.replace_gates(positions=[i], circuits=[neo_b]) w2 = -g.shift Oplus = ExpectationValueImpl(U=U1, H=hamiltonian) Ominus = ExpectationValueImpl(U=U2, H=hamiltonian) dOinc = w1 * Objective(args=[Oplus]) + w2 * Objective(args=[Ominus]) return dOinc
def __grad_gaussian(unitary, g, i, variable, hamiltonian): ''' function for getting the gradients of gaussian gates. NOTE: you had better compile first. :param unitary: QCircuit: the QCircuit object containing the gate to be differentiated :param g: a parametrized: the gate being differentiated :param i: Int: the position in unitary at which g appears :param variable: Variable or String: the variable with respect to which gate g is being differentiated :param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary is contained within an ExpectationValue :return: an Objective, whose calculation yields the gradient of g w.r.t variable ''' if not hasattr(g, "shift"): raise TequilaException("No shift found for gate {}".format(g)) # neo_a and neo_b are the shifted versions of gate g needed to evaluate its gradient shift_a = g._parameter + np.pi / (4 * g.shift) shift_b = g._parameter - np.pi / (4 * g.shift) neo_a = copy.deepcopy(g) neo_a._parameter = shift_a neo_b = copy.deepcopy(g) neo_b._parameter = shift_b U1 = unitary.replace_gates(positions=[i], circuits=[neo_a]) w1 = g.shift * __grad_inner(g.parameter, variable) U2 = unitary.replace_gates(positions=[i], circuits=[neo_b]) w2 = -g.shift * __grad_inner(g.parameter, variable) Oplus = ExpectationValueImpl(U=U1, H=hamiltonian) Ominus = ExpectationValueImpl(U=U2, H=hamiltonian) dOinc = w1 * Objective(args=[Oplus]) + w2 * Objective(args=[Ominus]) return dOinc
def __grad_vector_objective(objective: typing.Union[Objective, VectorObjective], variable: Variable): argsets = objective.argsets transformations = objective._transformations outputs = [] for pos in range(len(objective)): args = argsets[pos] transformation = transformations[pos] dO = None processed_expectationvalues = {} for i, arg in enumerate(args): if __AUTOGRAD__BACKEND__ == "jax": df = jax.grad(transformation, argnums=i) elif __AUTOGRAD__BACKEND__ == "autograd": df = jax.grad(transformation, argnum=i) else: raise TequilaException( "Can't differentiate without autograd or jax") # We can detect one simple case where the outer derivative is const=1 if transformation is None or transformation == identity: outer = 1.0 else: outer = Objective(args=args, transformation=df) if hasattr(arg, "U"): # save redundancies if arg in processed_expectationvalues: inner = processed_expectationvalues[arg] else: inner = __grad_inner(arg=arg, variable=variable) processed_expectationvalues[arg] = inner else: # this means this inner derivative is purely variable dependent inner = __grad_inner(arg=arg, variable=variable) if inner == 0.0: # don't pile up zero expectationvalues continue if dO is None: dO = outer * inner else: dO = dO + outer * inner if dO is None: dO = Objective() outputs.append(dO) if len(outputs) == 1: return outputs[0] return outputs
def grad(objective: Objective, variable: Variable = None, no_compile=False): ''' wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms. :param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated :param variables (list of Variable): parameter with respect to which obj should be differentiated. default None: total gradient. return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number. ''' if variable is None: # None means that all components are created variables = objective.extract_variables() result = {} if len(variables) == 0: raise TequilaException( "Error in gradient: Objective has no variables") for k in variables: assert (k is not None) result[k] = grad(objective, k, no_compile=no_compile) return result else: variable = assign_variable(variable) if no_compile: compiled = objective else: compiler = Compiler(multitarget=True, trotterized=True, hadamard_power=True, power=True, controlled_phase=True, controlled_rotation=True) compiled = compiler(objective, variables=[variable]) if variable not in compiled.extract_variables(): raise TequilaException( "Error in taking gradient. Objective does not depend on variable {} " .format(variable)) if isinstance(objective, ExpectationValueImpl): return __grad_expectationvalue(E=objective, variable=variable) elif objective.is_expectationvalue(): return __grad_expectationvalue(E=compiled.args[-1], variable=variable) elif isinstance(compiled, Objective): return __grad_objective(objective=compiled, variable=variable) else: raise TequilaException( "Gradient not implemented for other types than ExpectationValue and Objective." )
def test_exotic_gradients(gradvar): # a and b will fail for autograd not with jax a = Variable('a') b = Variable('b') c = Variable('c') d = Variable('d') e = Variable('e') f = Variable('f') variables = {a: 2.0, b: 3.0, c: 4.0, d: 5.0, e: 6.0, f: 7.0} t = c * a**b + b / c - Objective( args=[c], transformation=np.cos) + f / (d * e) + a * Objective( args=[d], transformation=np.exp) / (f + b) + Objective( args=[e], transformation=np.tanh) + Objective( args=[f], transformation=np.sinc) g = grad(t, gradvar) if gradvar == 'a': assert np.isclose( g(variables), c(variables) * b(variables) * (a(variables)**(b(variables) - 1.)) + np.exp(d(variables)) / (f(variables) + b(variables))) if gradvar == 'b': assert np.isclose( g(variables), (c(variables) * a(variables)**b(variables)) * np.log(a(variables)) + 1. / c(variables) - a(variables) * np.exp(d(variables)) / (f(variables) + b(variables))**2.0) if gradvar == 'c': assert np.isclose( g(variables), a(variables)**b(variables) - b(variables) / c(variables)**2. + np.sin(c(variables))) if gradvar == 'd': assert np.isclose( g(variables), -f(variables) / (np.square(d(variables)) * e(variables)) + a(variables) * np.exp(d(variables)) / (f(variables) + b(variables))) if gradvar == 'e': assert np.isclose( g(variables), 2. / (1. + np.cosh(2 * e(variables))) - f(variables) / (d(variables) * e(variables)**2.)) if gradvar == 'f': assert np.isclose( g(variables), 1. / (d(variables) * e(variables)) - a(variables) * np.exp(d(variables)) / (f(variables) + b(variables))**2. + np.cos(np.pi * f(variables)) / f(variables) - np.sin(np.pi * f(variables)) / (np.pi * f(variables)**2.))
def __grad_shift_rule(unitary, g, i, variable, hamiltonian): ''' function for getting the gradients of directly differentiable gates. Expects precompiled circuits. :param unitary: QCircuit: the QCircuit object containing the gate to be differentiated :param g: a parametrized: the gate being differentiated :param i: Int: the position in unitary at which g appears :param variable: Variable or String: the variable with respect to which gate g is being differentiated :param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary is contained within an ExpectationValue :return: an Objective, whose calculation yields the gradient of g w.r.t variable ''' # possibility for overwride in custom gate construction if hasattr(g, "shifted_gates"): inner_grad = __grad_inner(g.parameter, variable) shifted = g.shifted_gates() dOinc = Objective() for x in shifted: w, g = x Ux = unitary.replace_gates(positions=[i], circuits=[g]) wx = w * inner_grad Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian) dOinc += wx * Ex return dOinc if not hasattr(g, "eigenvalues_magnitude"): raise TequilaException( "No shift-rule found for gate {}. Neither shifted_gates nor eigenvalues_magnitude not defined" .format(g)) # neo_a and neo_b are the shifted versions of gate g needed to evaluate its gradient shift_a = g._parameter + pi / (4 * g.eigenvalues_magnitude) shift_b = g._parameter - pi / (4 * g.eigenvalues_magnitude) neo_a = copy.deepcopy(g) neo_a._parameter = shift_a neo_b = copy.deepcopy(g) neo_b._parameter = shift_b U1 = unitary.replace_gates(positions=[i], circuits=[neo_a]) w1 = g.eigenvalues_magnitude * __grad_inner(g.parameter, variable) U2 = unitary.replace_gates(positions=[i], circuits=[neo_b]) w2 = -g.eigenvalues_magnitude * __grad_inner(g.parameter, variable) Oplus = ExpectationValueImpl(U=U1, H=hamiltonian) Ominus = ExpectationValueImpl(U=U2, H=hamiltonian) dOinc = w1 * Objective(args=[Oplus]) + w2 * Objective(args=[Ominus]) return dOinc
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable): ''' implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper. :param unitary: the unitary whose gradient should be obtained :param variables (list, dict, str): the variables with respect to which differentiation should be performed. :return: vector (as dict) of dU/dpi as Objective (without hamiltonian) ''' hamiltonian = E.H unitary = E.U assert (unitary.verify()) # fast return if possible if variable not in unitary.extract_variables(): return 0.0 param_gates = unitary._parameter_map[variable] dO = Objective() for idx_g in param_gates: idx, g = idx_g # failsafe if g.is_controlled(): raise TequilaException( "controlled gate in gradient: Compiler was not called. Gate is {}" .format(g)) if not hasattr(g, "eigenvalues_magnitude"): raise TequilaException('No shift found for gate {}'.format(g)) dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian) dO += dOinc assert dO is not None return dO
def get_domain(self, objective: Objective, passive_angles: dict = None) -> typing.List[typing.Dict]: """ return a 'domain' object, for use by GPyOpt. This function constructs a list of dictionaries about each variable in objective to optimize over: we enforce the domain of 0 to 2 pi, the period of a rotation, since some domain MUST be specified. Parameters ---------- objective: Objective: the Objective to extract variables from to build the domain. passive_angles: dict, optional: a dictionary of which angles are passive, in Objective. Default: there are none; optimize all angles. Returns ------- list of dicts the domain object for use by gpyopt. """ op = objective.extract_variables() if passive_angles is not None: for i, thing in enumerate(op): if thing in passive_angles.keys(): op.remove(thing) return [{ 'name': v, 'type': 'continuous', 'domain': (0, 2 * np.pi) } for v in op]
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable): ''' implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper. :param unitary: the unitary whose gradient should be obtained :param variables (list, dict, str): the variables with respect to which differentiation should be performed. :return: vector (as dict) of dU/dpi as Objective (without hamiltonian) ''' hamiltonian = E.H unitary = E.U if not (unitary.verify()): raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary)) # fast return if possible if variable not in unitary.extract_variables(): return 0.0 param_gates = unitary._parameter_map[variable] dO = Objective() for idx_g in param_gates: idx, g = idx_g dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian) dO += dOinc assert dO is not None return dO
def __call__(self, objective: Objective, maxiter: int, passives: typing.Dict[Variable, numbers.Real] = None, samples: int = None, backend: str = None, noise=None, method: str = 'lbfgs') -> GPyOptReturnType: if self.samples is not None: if samples is None: samples = self.samples else: pass else: pass dom = self.get_domain(objective, passives) init = {v: np.random.uniform(0, 2 * np.pi) for v in objective.extract_variables()} ### O is broken, not using it right now O = compile(objective=objective, variables=init, backend=backend, noise=noise, samples=samples) f = self.construct_function(O, backend, passives, samples, noise_model=noise) opt = self.get_object(f, dom, method) opt.run_optimization(maxiter) if self.save_history: self.history.energies = opt.get_evaluations()[1].flatten() self.history.angles = [self.redictify(v, objective, passives) for v in opt.get_evaluations()[0]] return GPyOptReturnType(energy=opt.fx_opt, angles=self.redictify(opt.x_opt, objective, passives), history=self.history, opt=opt)
def qng_grad_gaussian(unitary, g, i, hamiltonian) -> Objective: """ get the gradient of an expectationvalue of a unitary and a hamiltonian with respect to gaussian gate g. THIS variant of the function does not seek out underlying gate parameters; it treats each variable 'as is'. This treatment is necessary for the QNG but is incorrect elsewhere. Parameters ---------- unitary: QCircuit: the QCircuit object containing the gate to be differentiated g: parametrized gate: the gate being differentiated i: int: the position in unitary at which g appears. hamiltonian: QubitHamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary is contained within an ExpectationValue Returns ------- Objective: the analytical gradient of <U,H> w.r.t g=g(theta_g) """ ### unlike grad_gaussian, this doesn't dig below, into a gate's underlying parametrization. ### In other words, if a gate is Rx(y), y=f(x), this gives you back d Rx / dy. if not hasattr(g, "shift"): raise TequilaException("No shift found for gate {}".format(g)) # neo_a and neo_b are the shifted versions of gate g needed to evaluate its gradient shift_a = g._parameter + numpy.pi / (4 * g.shift) shift_b = g._parameter - numpy.pi / (4 * g.shift) neo_a = copy.deepcopy(g) neo_a._parameter = shift_a neo_b = copy.deepcopy(g) neo_b._parameter = shift_b U1 = unitary.replace_gates(positions=[i], circuits=[neo_a]) w1 = g.shift U2 = unitary.replace_gates(positions=[i], circuits=[neo_b]) w2 = -g.shift Oplus = ExpectationValueImpl(U=U1, H=hamiltonian) Ominus = ExpectationValueImpl(U=U2, H=hamiltonian) dOinc = w1 * Objective(args=[Oplus]) + w2 * Objective(args=[Ominus]) return dOinc
def minimize(objective: Objective, maxiter: int, samples: int = None, variables: typing.List = None, initial_values: typing.Dict = None, backend: str = None, noise=None, method: str = 'lbfgs') -> GPyOptReturnType: """ Parameters ---------- objective: Objective : The tequila objective to optimize initial_values: typing.Dict[typing.Hashable, numbers.Real]: (Default value = None): Initial values as dictionary of Hashable types (variable keys) and floating point numbers. generates FIXED variables! if not provided, all variables will be optimized. variables: typing.List[typing.Hashable] : (Default value = None) List of Variables to optimize. If None, all variables optimized, and the passives command is over-ruled. samples: int : (Default value = None) samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation) maxiter: int : how many iterations of GPyOpt to run. Note: GPyOpt will override this as it sees fit. backend: str : (Default value = None) Simulator backend, will be automatically chosen if set to None noise: NoiseModel : (Default value = None) a noise model to apply to the circuits of Objective. method: str: (Default value = 'lbfgs') method of acquisition. Allowed arguments are 'lbfgs', 'DIRECT', and 'CMA' Returns ------- """ if variables is None: passives = None else: all_vars = Objective.extract_variables() passives = {} for k, v in initial_values.items(): if k not in variables and k in all_vars: passives[k] = v optimizer = OptimizerGpyOpt() return optimizer(objective=objective, samples=samples, backend=backend, passives=passives, maxiter=maxiter, noise=noise, method=method)
def preamble(objective: Objective, compile_args: dict = None, input_vars: list = None): """ Helper function for interfaces to ml backends. Parameters ---------- objective: Objective: the objective to manipulate and compile. compile_args: dict, optional: a dictionary of args that can be passed as kwargs to tq.compile input_vars: list, optional: a list of variables of the objective to specify as input, rather than itnernal weights. Returns ------- tuple the compiled objective, it's compile arguments, its weight variables, dicts for the weight and input gradients, and a dictionary that links positions in an array to each variable (parses parameters). """ def var_sorter(e): return hash(e.name) all_vars = objective.extract_variables() all_vars.sort(key=var_sorter) compile_args = check_compiler_args(compile_args) weight_vars = [] if input_vars is None: input_vars = [] weight_vars = all_vars else: input_vars = [assign_variable(v) for v in input_vars] for var in all_vars: if var not in input_vars: weight_vars.append(assign_variable(var)) init_vals = compile_args['initial_values'] if init_vals is not None: for k in init_vals.keys(): if assign_variable(k) in input_vars: raise TequilaMLException( 'initial_values contained key {},' 'which is meant to be an input variable.'.format(k)) compile_args['initial_values'] = format_variable_dictionary(init_vals) comped = compile(objective, **compile_args) gradients = get_gradients(objective, compile_args) w_grad, i_grad = separate_gradients(gradients, weight_vars=weight_vars, input_vars=input_vars) first, second = get_variable_orders(weight_vars, input_vars) return comped, compile_args, input_vars, weight_vars, i_grad, w_grad, first, second
def __grad_objective(objective: Objective, variable: Variable): args = objective.args transformation = objective.transformation dO = None processed_expectationvalues = {} for i, arg in enumerate(args): if __AUTOGRAD__BACKEND__ == "jax": df = jax.grad(transformation, argnums=i) elif __AUTOGRAD__BACKEND__ == "autograd": df = jax.grad(transformation, argnum=i) else: raise TequilaException( "Can't differentiate without autograd or jax") # We can detect one simple case where the outer derivative is const=1 if objective.transformation is None: outer = 1.0 else: outer = Objective(args=args, transformation=df) if hasattr(arg, "U"): # save redundancies if arg in processed_expectationvalues: inner = processed_expectationvalues[arg] else: inner = __grad_inner(arg=arg, variable=variable) processed_expectationvalues[arg] = inner else: # this means this inner derivative is purely variable dependent inner = __grad_inner(arg=arg, variable=variable) if inner == 0.0: # don't pile up zero expectationvalues continue if dO is None: dO = outer * inner else: dO = dO + outer * inner if dO is None: raise TequilaException("caught None in __grad_objective") return dO
def get_qng_combos(objective, func=stokes_block, initial_values=None, samples=None, backend=None, device=None, noise=None) -> typing.List[typing.Dict]: """ get all the objects needed to evaluate the qng for some objective; return them in a list of dictionaries. Parameters ---------- objective: Objective: the Objective whose qng is sought. func: callable: (Default = stokes_block): the function used to obtain the (blocks of) the qgt. Default uses stokes_block, defined above. initial_values: dict, optional: a dictionary indicating the intial parameters with which to compile all objectives appearing in the qng. samples: int, optional: the number of samples with which to compile all objectives appearing in the qng. Default: none. backend: str, optional: the backend with which to compile all objectives appearing in the qng. default: pick for you. device: optional: the device with which to compile all objectives appearing in the qng. Default: no device use or emulation. noise: str or NoiseModel, optional: the noise model with which to compile all objectives appearing in the qng. Default: no noise. Returns ------- list of dicts: a list of dictionaries, each entry corresponding to the qng for 1 argument of objective, in the order of said objectives. """ combos = [] vars = objective.extract_variables() compiled = compile_multitarget(gate=objective) compiled = compile_trotterized_gate(gate=compiled) compiled = compile_h_power(gate=compiled) compiled = compile_power_gate(gate=compiled) compiled = compile_controlled_phase(gate=compiled) compiled = compile_controlled_rotation(gate=compiled) for i, arg in enumerate(compiled.args): if not isinstance(arg, ExpectationValueImpl): ### this is a variable, no QNG involved mat = QngMatrix([[[1]]]) vec = CallableVector([__grad_inner(arg, arg)]) mapping = {0: {v: __grad_inner(arg, v) for v in vars}} else: ### if the arg is an expectationvalue, we need to build some qngs and mappings! blocks = func(arg, initial_values=initial_values, samples=samples, device=device, backend=backend, noise=noise) mat = QngMatrix(blocks) vec = subvector_procedure(arg, initial_values=initial_values, samples=samples, device=device, backend=backend, noise=noise) mapping = {} self_pars = get_self_pars(arg.U) for j, p in enumerate(self_pars): indict = {} for v in p.extract_variables(): gi = __grad_inner(p, v) if isinstance(gi, Objective): g = compile_objective(gi, variables=initial_values, samples=samples, device=device, backend=backend, noise=noise) else: g = gi indict[v] = g mapping[j] = indict posarg = jax.grad(compiled.transformation, i) p = Objective(compiled.args, transformation=posarg) pos = compile_objective(p, variables=initial_values, samples=samples, device=device, backend=backend, noise=noise) combos.append(qng_dict(arg, mat, vec, mapping, pos)) return combos
def __call__(self, objective: Objective, maxiter=None, variables: typing.List[Variable] = None, initial_values: typing.Dict[Variable, numbers.Real] = None, previous=None, phoenics_config=None, save_to_file=False, file_name=None, *args, **kwargs): active_angles, passive_angles, variables = self.initialize_variables(objective, initial_values=initial_values, variables=variables) if maxiter is None: maxiter = 10 obs = [] bird = self._make_phoenics_object(objective, passive_angles, phoenics_config, *args, **kwargs) if previous is not None: if type(previous) is str: try: obs = pickle.load(open(previous, 'rb')) except: print( 'failed to load previous observations, which are meant to be a pickle file. Starting fresh.') elif type(previous) is list: if all([type(k) == dict for k in previous]): obs = previous else: print('previous observations were not in the correct format (list of dicts). Starting fresh.') if not (type(file_name) == str or file_name == None): raise TequilaException('file_name must be a string, or None!') best = None best_angles = None # avoid multiple compilations compiled_objective = compile_objective(objective=objective, backend=self.backend, backend_options=self.backend_options, samples=self.samples, noise=self.noise) if not self.silent: print('phoenics has recieved') print("objective: \n") print(objective) print("noise model : {}".format(self.noise)) print("samples : {}".format(self.samples)) print("maxiter : {}".format(maxiter)) print("variables : {}".format(objective.extract_variables())) print("passive var : {}".format(passive_angles)) print("backend options {} ".format(self.backend), self.backend_options) print('now lets begin') for i in range(0, maxiter): with warnings.catch_warnings(): np.testing.suppress_warnings() warnings.simplefilter("ignore") warnings.filterwarnings("ignore", category=FutureWarning) if len(obs) >= 1: precs = bird.recommend(observations=obs) else: precs = bird.recommend() runs = [] recs = self._process_for_sim(precs, passive_angles=passive_angles) start = time.time() for j, rec in enumerate(recs): En = compiled_objective(variables=rec, samples=self.samples, noise=self.noise, backend_options=self.backend_options) runs.append((rec, En)) if not self.silent: if self.print_level > 2: print("energy = {:+2.8f} , angles=".format(En), rec) else: print("energy = {:+2.8f}".format(En)) stop = time.time() if not self.silent: print("Quantum Objective evaluations: {}s Wall-Time".format(stop-start)) for run in runs: angles = run[0] E = run[1] if best is None: best = E best_angles = angles else: if self._minimize: if E < best: best = E best_angles = angles else: if E > best: best = E best_angles = angles if self.save_history: self.history.energies.append(E) self.history.angles.append(angles) obs.append(self._process_for_phoenics(angles, E, passive_angles=passive_angles)) if file_name is not None: with open(file_name, 'wb') as file: pickle.dump(obs, file) if not self.silent: print("best energy after {} iterations : {:+2.8f}".format(self.maxiter, best)) return PhoenicsReturnType(energy=best, angles=best_angles, history=self.history, observations=obs,object=bird)
def test_transform_update(): a = Variable('a') b = Variable('a.') t = Objective(transformation=operator.add, args=[a, b]) variables = {a: 8, b: 1, a: 9, "c": 17} assert np.isclose(float(t(variables)), 10.0)
def minimize(objective: Objective, maxiter: int = None, samples: int = None, variables: typing.List = None, initial_values: typing.Dict = None, backend: str = None, noise=None, previous: typing.Union[str, list] = None, phoenics_config: typing.Union[str, typing.Dict] = None, save_to_file: bool = False, file_name: str = None, silent: bool = False, *args, **kwargs): """ Parameters ---------- objective: Objective : The tequila objective to optimize initial_values: typing.Dict[typing.Hashable, numbers.Real]: (Default value = None): Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero variables: typing.List[typing.Hashable] : (Default value = None) List of Variables to optimize samples: int : (Default value = None) samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation) maxiter: int : how many iterations of phoenics to run. Note that this is NOT identical to the number of times the circuit will run. backend: str : (Default value = None) Simulator backend, will be automatically chosen if set to None noise: NoiseModel : (Default value = None) a noise model to apply to the circuits of Objective. previous: (Default value = None) Previous phoenics observations. If string, the name of a file from which to load them. Else, a list. phoenics_config: (Default value = None) a pre-made phoenics configuration. if str, the name of a file from which to load it; Else, a dictionary. Individual keywords of the 'general' sections can also be passed down as kwargs save_to_file: bool: (Default value = False) whether or not to save the output of the optimization to an external file file_name: str: (Default value = None) where to save output to, if save_to_file is True. kwargs: dict: Send down more keywords for single replacements in the phoenics config 'general' section, like e.g. batches=5, boosted=True etc Returns ------- """ if variables is None: passives = None else: all_vars = Objective.extract_variables() passives = {} for k, v in initial_values.items(): if k not in variables and k in all_vars: passives[k] = v optimizer = OptimizerPhoenics(samples=samples, backend=backend, maxiter=maxiter, silent=silent) return optimizer(objective=objective, backend=backend, passives=passives, previous=previous, maxiter=maxiter, noise=noise, samples=samples, phoenics_config=phoenics_config, save_to_file=save_to_file, file_name=file_name, *args, **kwargs)
def __call__(self, objective: Objective, maxiter: int = None, passives: typing.Dict[Variable, numbers.Real] = None, samples: int = None, backend: str = None, noise=None, previous=None, phoenics_config=None, save_to_file=False, file_name=None, *args, **kwargs): backend_options = {} if 'backend_options' in kwargs: backend_options = kwargs['backend_options'] if maxiter is None: maxiter = 10 bird = self._make_phoenics_object(objective, passives, phoenics_config, *args, **kwargs) if previous is not None: if type(previous) is str: try: obs = pickle.load(open(previous, 'rb')) except: print( 'failed to load previous observations, which are meant to be a pickle file. Please try again or seek assistance. Starting fresh.') obs = [] elif type(previous) is list: if all([type(k) == dict for k in previous]): obs = previous else: print( 'previous observations were not in the correct format (list of dicts). Are you sure you gave me the right info? Starting fresh.') obs = [] else: obs = [] if save_to_file is True: if type(file_name) is str: pass elif file_name is None: raise TequilaException( 'You have asked me to save phoenics observations without telling me where to do so! please provide a file_name') else: raise TequilaException('file_name must be a string!') ### this line below just gets the damn compiler to run, since that argument is necessary init = {key: np.pi for key in objective.extract_variables()} best = None best_angles = None # avoid multiple compilations compiled_objective = compile_objective(objective=objective, backend=backend, samples=samples, noise=noise) if not self.silent: print('phoenics has recieved') print("objective: \n") print(objective) print("noise model : {}".format(noise)) print("samples : {}".format(samples)) print("maxiter : {}".format(maxiter)) print("variables : {}".format(objective.extract_variables())) print("passive var : {}".format(passives)) print("backend options {} ".format(backend), backend_options) print('now lets begin') for i in range(0, maxiter): with warnings.catch_warnings(): np.testing.suppress_warnings() warnings.simplefilter("ignore") warnings.filterwarnings("ignore", category=FutureWarning) if len(obs) >= 1: precs = bird.recommend(observations=obs) else: precs = bird.recommend() runs = [] recs = self._process_for_sim(precs, passives=passives) start = time.time() for i, rec in enumerate(recs): En = compiled_objective(variables=rec, samples=samples, noise=noise, **backend_options) runs.append((rec, En)) if not self.silent: print("energy = {:+2.8f} , angles=".format(En), rec) stop = time.time() if not self.silent: print("Quantum Objective evaluations: {}s Wall-Time".format(stop-start)) for run in runs: angles = run[0] E = run[1] if best is None: best = E best_angles = angles else: if self._minimize: if E < best: best = E best_angles = angles else: if E > best: best = E best_angles = angles if self.save_history: self.history.energies.append(E) self.history.angles.append(angles) obs.append(self._process_for_phoenics(angles, E, passives=passives)) if save_to_file is True: with open(file_name, 'wb') as file: pickle.dump(obs, file) if not self.silent: print("best energy after {} iterations : {:+2.8f}".format(self.maxiter, best)) return PhoenicsReturnType(energy=best, angles=best_angles, history=self.history, observations=obs)
def __call__(self, objective: Objective, maxiter=None, variables: typing.List[Variable] = None, initial_values: typing.Dict[Variable, numbers.Real] = None, previous=None, phoenics_config=None, file_name=None, *args, **kwargs): """ Perform optimization with phoenics. Parameters ---------- objective: Objective the objective to optimize. maxiter: int: (Default value = None) if not None, overwrite the init maxiter with new number. variables: list: (Default value = None) which variables to optimize over. If None: all of the variables in objective are used. initial_values: dict: (Default value = None) an initial point to begin optimization from. Random, if None. previous: previous observations, formatted for phoenics, to use in optimization. For use by advanced users. phoenics_config: a config for a phoenics object. file_name: a file args kwargs Returns ------- PhoenicsResults: the results of optimization by phoenics. """ objective = objective.contract() active_angles, passive_angles, variables = self.initialize_variables( objective, initial_values=initial_values, variables=variables) if maxiter is None: maxiter = 10 obs = [] bird = self._make_phoenics_object(objective, passive_angles, phoenics_config, *args, **kwargs) if previous is not None: if type(previous) is str: try: obs = pickle.load(open(previous, 'rb')) except: print( 'failed to load previous observations, which are meant to be a pickle file. Starting fresh.' ) elif type(previous) is list: if all([type(k) == dict for k in previous]): obs = previous else: print( 'previous observations were not in the correct format (list of dicts). Starting fresh.' ) if not (type(file_name) == str or file_name == None): raise TequilaException( 'file_name must be a string, or None. Recieved {}'.format( type(file_name))) best = None best_angles = None # avoid multiple compilations compiled_objective = compile_objective(objective=objective, backend=self.backend, device=self.device, samples=self.samples, noise=self.noise) if not self.silent: print('phoenics has recieved') print("objective: \n") print(objective) print("noise model : {}".format(self.noise)) print("samples : {}".format(self.samples)) print("maxiter : {}".format(maxiter)) print("variables : {}".format(objective.extract_variables())) print("passive var : {}".format(passive_angles)) print('now lets begin') for i in range(0, maxiter): with warnings.catch_warnings(): np.testing.suppress_warnings() warnings.simplefilter("ignore") warnings.filterwarnings("ignore", category=FutureWarning) precs = bird.recommend(observations=obs) runs = [] recs = self._process_for_sim(precs, passive_angles=passive_angles) start = time.time() for j, rec in enumerate(recs): En = compiled_objective(variables=rec, samples=self.samples, noise=self.noise) runs.append((rec, En)) if not self.silent: if self.print_level > 2: print("energy = {:+2.8f} , angles=".format(En), rec) else: print("energy = {:+2.8f}".format(En)) stop = time.time() if not self.silent: print("Quantum Objective evaluations: {}s Wall-Time".format( stop - start)) for run in runs: angles = run[0] E = run[1] if best is None: best = E best_angles = angles else: if self._minimize: if E < best: best = E best_angles = angles else: if E > best: best = E best_angles = angles if self.save_history: self.history.energies.append(E) self.history.angles.append(angles) obs.append( self._process_for_phoenics(angles, E, passive_angles=passive_angles)) if file_name is not None: with open(file_name, 'wb') as file: pickle.dump(obs, file) if not self.silent: print("best energy after {} iterations : {:+2.8f}".format( self.maxiter, best)) return PhoenicsResults(energy=best, variables=best_angles, history=self.history, observations=obs, phoenics_instance=bird)
def get_qng_combos(objective, initial_values=None, samples=None, backend=None, backend_options=None, noise=None): combos = [] vars = objective.extract_variables() compiled = compile_multitarget(gate=objective) compiled = compile_trotterized_gate(gate=compiled) compiled = compile_h_power(gate=compiled) compiled = compile_power_gate(gate=compiled) compiled = compile_controlled_phase(gate=compiled) compiled = compile_controlled_rotation(gate=compiled) for i, arg in enumerate(compiled.args): if not isinstance(arg, ExpectationValueImpl): ### this is a variable, no QNG involved mat = QngMatrix([[[1]]]) vec = CallableVector([__grad_inner(arg, arg)]) mapping = {0: {v: __grad_inner(arg, v) for v in vars}} else: ### if the arg is an expectationvalue, we need to build some qngs and mappings! blocks = qng_metric_tensor_blocks(arg, initial_values=initial_values, samples=samples, backend=backend, noise=noise, backend_options=backend_options) mat = QngMatrix(blocks) vec = subvector_procedure(arg, initial_values=initial_values, samples=samples, backend=backend, noise=noise, backend_options=backend_options) mapping = {} self_pars = get_self_pars(arg.U) for j, p in enumerate(self_pars): indict = {} for v in p.extract_variables(): gi = __grad_inner(p, v) if isinstance(gi, Objective): g = compile_objective(gi, variables=initial_values, samples=samples, backend=backend, noise=noise, backend_options=backend_options) else: g = gi indict[v] = g mapping[j] = indict posarg = jax.grad(compiled.transformation, argnums=i) p = Objective(compiled.args, transformation=posarg) pos = compile_objective(p, variables=initial_values, samples=samples, backend=backend, noise=noise, backend_options=backend_options) combos.append(qng_dict(arg, mat, vec, mapping, pos)) return combos