def minimalCommutatorDistance(c1, c2, sdp=None, relaxation_level = 2): # No sdp passed, create one if sdp is None: D = ncp.generate_operators('D', 1)[0] X = ncp.generate_operators('X', 1)[0] # The sdp will ignore the +1 term, we have to add it on at the end obj = Dagger(commutator(D,X) - 1)*(commutator(D,X) - 1) inequality_cons = [c1**2 - Dagger(X)*X >= 0, c2**2 - Dagger(D)*D >= 0] sdp = ncp.SdpRelaxation([D,X], normalized = True) sdp.get_relaxation(level = relaxation_level, objective = obj, inequalities = inequality_cons) else: # sdp object passed. Use process_constraints instead D = sdp.monomial_sets[0][1] X = sdp.monomial_sets[0][2] inequality_cons = [c1**2 - Dagger(X)*X >= 0, c2**2 - Dagger(D)*D >= 0] sdp.process_constraints(inequalities = inequality_cons) # Now solve sdp.solve(solver = SOLVER_NAME, solverparameters = SOLVER_EXE) if sdp.status == 'optimal': return sqrt(sdp.primal + 1), sdp else: return None, sdp
def isFeasible(c1, c2, eps, sdp = None, relaxation_level = 2): # No sdp passed, create one if sdp is None: D = ncp.generate_operators('D', 1)[0] X = ncp.generate_operators('X', 1)[0] obj = 1.0 inequality_cons = [c1**2 - Dagger(X)*X >= 0, c2**2 - Dagger(D)*D >= 0, eps**2 - Dagger(commutator(D,X))*commutator(D,X) + Dagger(commutator(D,X)) + commutator(D,X) - 1 >= 0] sdp = ncp.SdpRelaxation([D,X], normalized = True) sdp.get_relaxation(level = relaxation_level, objective = obj, inequalities = inequality_cons) else: # sdp object passed. Use process_constraints instead D = sdp.monomial_sets[0][1] X = sdp.monomial_sets[0][2] inequality_cons = [c1**2 - Dagger(X)*X >= 0, c2**2 - Dagger(D)*D >= 0, eps**2 - Dagger(commutator(D,X))*commutator(D,X) + Dagger(commutator(D,X)) + commutator(D,X) - 1 >= 0] sdp.process_constraints(inequalities = inequality_cons) # Now solve sdp.solve(solver = SOLVER_NAME, solverparameters = SOLVER_EXE) if sdp.status == 'optimal': return True, sdp else: return False, sdp
def _relax(self): """ Creates the sdp relaxation object from ncpol2sdpa. """ if self.solver == None: self.solver = self.DEFAULT_SOLVER_PATH self._eq_cons = [] # equality constraints self._proj_cons = {} # projective constraints self._A_ops = [] # Alice's operators self._B_ops = [] # Bob's operators self._obj = 0 # Objective function self._obj_const = '' # Extra objective normalisation constant self._sdp = None # SDP object # Creating the operator constraints nrm = '' # Need as many decompositions as there are generating outcomes for k in range(np.prod(self.generation_output_size)): self._A_ops += [ ncp.generate_measurements(self.io_config[0], 'A' + str(k) + '_') ] self._B_ops += [ ncp.generate_measurements(self.io_config[1], 'B' + str(k) + '_') ] self._proj_cons.update( ncp.projective_measurement_constraints(self._A_ops[k], self._B_ops[k])) #Also building a normalisation string for next step nrm += '+' + str(k) + '[0,0]' # Adding the constraints # Normalisation constraint self._eq_cons.append(nrm + '-1') self._base_constraint_expressions = [] # Create the game expressions for game in self.games: tmp_expr = 0 for k in range(np.prod(self.generation_output_size)): tmp_expr += -ncp.define_objective_with_I( game._cgmatrix, self._A_ops[k], self._B_ops[k]) self._base_constraint_expressions.append(tmp_expr) # Specify the scores for these expressions including any shifts for i, game in enumerate(self.games): #We must account for overshifting in the score coming from the decomposition self._eq_cons.append(self._base_constraint_expressions[i] - game.score - game._cgshift * (np.prod(self.generation_output_size) - 1)) self._obj, self._obj_const = guessingProbabilityObjectiveFunction( self.io_config, self.generation_inputs, self._A_ops, self._B_ops) # Initialising SDP ops = [ ncp.flatten([self._A_ops[0], self._B_ops[0]]), ncp.flatten([self._A_ops[1], self._B_ops[1]]), ncp.flatten([self._A_ops[2], self._B_ops[2]]), ncp.flatten([self._A_ops[3], self._B_ops[3]]) ] self._sdp = ncp.SdpRelaxation(ops, verbose=self.verbose, normalized=False) self._sdp.get_relaxation(level=self._relaxation_level, momentequalities=self._eq_cons, objective=self._obj, substitutions=self._proj_cons, extraobjexpr=self._obj_const)
# We include some extra monomials in the relaxation to boost rates extra_monos = [] for v in V1 + V2: for Ax in A: for a in Ax: for By in B: for b in By: extra_monos += [a * b * v] extra_monos += [a * b * Dagger(v)] # Objective function obj = A[0][0] * (V1[0] + Dagger(V1[0])) / 2.0 + A[0][1] * (V1[1] + Dagger(V1[1])) / 2.0 ops = ncp.flatten([A, B, V1, V2]) sdp = ncp.SdpRelaxation(ops, verbose=1, normalized=True, parallel=0) sdp.get_relaxation(level=LEVEL, equalities=operator_equalities, inequalities=operator_inequalities, momentequalities=moment_equalities, momentinequalities=moment_inequalities, objective=-obj, substitutions=substitutions, extramonomials=extra_monos) sdp.solve('mosek') print( f"For detection efficiency {test_eta} the system {test_sys} achieves a DI-QKD rate of {rate(sdp,test_sys,test_eta)}" )