def evaluateSingleOutput(self, epsilon, network, prediction, output): outputVars = network.outputVars[0] abs_epsilons = list() for k in network.matMulLayers.keys(): n, m = network.matMulLayers[k]['vals'].shape print(n, m) for i in range(n): for j in range(m): if j in [prediction, output]: epsilon_var = network.epsilons[i][j] network.setUpperBound(epsilon_var, epsilon) network.setLowerBound(epsilon_var, -epsilon) abs_epsilon_var = self.epsilonABS(network, epsilon_var) abs_epsilons.append(abs_epsilon_var) else: epsilon_var = network.epsilons[i][j] network.setUpperBound(epsilon_var, 0) network.setLowerBound(epsilon_var, 0) e = MarabouUtils.Equation( EquationType=MarabouUtils.MarabouCore.Equation.LE) for i in range(len(abs_epsilons)): e.addAddend(1, abs_epsilons[i]) e.setScalar(epsilon) network.addEquation(e) MarabouUtils.addInequality( network, [outputVars[prediction], outputVars[output]], [1, -1], 0) return network.solve(verbose=True)
def evaluateSingleOutput(self, epsilon, network, prediction, output): outputVars = network.outputVars[0] for k in network.matMulLayers.keys(): n, m = network.matMulLayers[k]['vals'].shape for i in range(n): for j in range(m): network.setUpperBound( network.matMulLayers[k]['epsilons'][i][j], epsilon) network.setLowerBound( network.matMulLayers[k]['epsilons'][i][j], -epsilon) MarabouUtils.addInequality( network, [outputVars[prediction], outputVars[output]], [1, -1], 0) return network.solve()
def findEpsilon(self, network, prediction): outputVars = network.outputVars predIndices = np.flip(np.argsort(prediction, axis=1), axis=1) for i in range(outputVars.shape[0]): maxPred = predIndices[i][0] secondMaxPred = predIndices[i][1] MarabouUtils.addInequality( network, [outputVars[i][maxPred], outputVars[i][secondMaxPred]], [1, -1], 0) results = self.getNetworkSolution(network) newOutput = np.array([[ results[2][outputVars[i][j]] for j in range(outputVars.shape[1]) ] for i in range(outputVars.shape[0])]) return results, predIndices[:, 0], predIndices[:, 1], newOutput
def evaluateEpsilon(self, epsilon, network, prediction): outputVars = network.outputVars abs_epsilons = list() preds = list() predIndices = np.flip(np.argsort(prediction, axis=1), axis=1) for i in range(outputVars.shape[0]): preds.append((predIndices[i][0], predIndices[i][1])) n, m = network.epsilons.shape print(n, m) for i in range(n): for j in range(m): if j in list(chain.from_iterable(preds)): epsilon_var = network.epsilons[i][j] network.setUpperBound(epsilon_var, epsilon) network.setLowerBound(epsilon_var, -epsilon) abs_epsilon_var = self.epsilonABS(network, epsilon_var) abs_epsilons.append(abs_epsilon_var) else: epsilon_var = network.epsilons[i][j] network.setUpperBound(epsilon_var, 0) network.setLowerBound(epsilon_var, 0) e = MarabouUtils.Equation( EquationType=MarabouUtils.MarabouCore.Equation.LE) for i in range(len(abs_epsilons)): e.addAddend(1, abs_epsilons[i]) e.setScalar(epsilon) network.addEquation(e) for i in range(outputVars.shape[0]): MarabouUtils.addInequality( network, [outputVars[i][preds[i][0]], outputVars[i][preds[i][1]]], [1, -1], 0) options = Marabou.createOptions(numWorkers=6, dnc=False) stats = network.solve(verbose=False, options=options) newOut = predIndices[:, 1] if stats[0]: return sat, stats, newOut else: return unsat, stats, newOut
def evaluateEpsilon(self, epsilon, network, prediction): outputVars = network.outputVars n, m = network.epsilons.shape for i in range(n): for j in range(m): network.setUpperBound(network.epsilons[i][j], epsilon) network.setLowerBound(network.epsilons[i][j], -epsilon) predIndices = np.flip(np.argsort(prediction, axis=1), axis=1) for i in range(outputVars.shape[0]): maxPred = predIndices[i][0] secondMaxPred = predIndices[i][1] MarabouUtils.addInequality( network, [outputVars[i][maxPred], outputVars[i][secondMaxPred]], [1, -1], 0) stats = network.solve(verbose=False) newOut = predIndices[:, 1] if stats[0]: return sat, stats, newOut else: return unsat, stats, newOut
def evaluateEpsilon(self, epsilon, network): # for outputNum in [0, 1]: outputVars = network.outputVars abs_epsilons = list() n, m = network.epsilons.shape print(n, m) for i in range(n): for j in range(m): epsilon_var = network.epsilons[i][j] network.setUpperBound(epsilon_var, epsilon) network.setLowerBound(epsilon_var, -epsilon) abs_epsilon_var = self.epsilonABS(network, epsilon_var) abs_epsilons.append(abs_epsilon_var) e = MarabouUtils.Equation( EquationType=MarabouUtils.MarabouCore.Equation.LE) for i in range(len(abs_epsilons)): e.addAddend(1, abs_epsilons[i]) e.setScalar(epsilon) network.addEquation(e) for i in range(outputVars.shape[0]): MarabouUtils.addInequality(network, [outputVars[i][0], outputVars[i][2]], [1, -1], self.correct_diff) MarabouUtils.addInequality(network, [outputVars[i][0], outputVars[i][3]], [1, -1], self.correct_diff) MarabouUtils.addInequality(network, [outputVars[i][0], outputVars[i][4]], [1, -1], self.correct_diff) MarabouUtils.addInequality(network, [outputVars[i][1], outputVars[i][2]], [1, -1], self.correct_diff) MarabouUtils.addInequality(network, [outputVars[i][1], outputVars[i][3]], [1, -1], self.correct_diff) MarabouUtils.addInequality(network, [outputVars[i][1], outputVars[i][4]], [1, -1], self.correct_diff) # MarabouUtils.addInequality(network, [outputVars[i][outputNum], outputVars[i][2]], [1, -1], self.correct_diff) # MarabouUtils.addInequality(network, [outputVars[i][outputNum], outputVars[i][3]], [1, -1], self.correct_diff) # MarabouUtils.addInequality(network, [outputVars[i][outputNum], outputVars[i][4]], [1, -1], self.correct_diff) vals = network.solve(verbose=True) if vals[0]: return sat, vals else: return unsat, vals
def findEpsilon(self, network): outputVars = network.outputVars for i in range(outputVars.shape[0]): MarabouUtils.addInequality(network, [outputVars[i][0], outputVars[i][2]], [1, -1], self.correct_diff) MarabouUtils.addInequality(network, [outputVars[i][0], outputVars[i][3]], [1, -1], self.correct_diff) MarabouUtils.addInequality(network, [outputVars[i][0], outputVars[i][4]], [1, -1], self.correct_diff) MarabouUtils.addInequality(network, [outputVars[i][1], outputVars[i][2]], [1, -1], self.correct_diff) MarabouUtils.addInequality(network, [outputVars[i][1], outputVars[i][3]], [1, -1], self.correct_diff) MarabouUtils.addInequality(network, [outputVars[i][1], outputVars[i][4]], [1, -1], self.correct_diff) # MarabouUtils.addInequality(network, [outputVars[i][outputNum], outputVars[i][2]], [1, -1], self.correct_diff) # MarabouUtils.addInequality(network, [outputVars[i][outputNum], outputVars[i][3]], [1, -1], self.correct_diff) # MarabouUtils.addInequality(network, [outputVars[i][outputNum], outputVars[i][4]], [1, -1], self.correct_diff) return self.getNetworkSolution(network)
# net1.setUpperBound(inputVars[4], 0.5) net1.setLowerBound(inputVars[0], -0.3284228772) net1.setUpperBound(inputVars[0], 0.6798577687) net1.setLowerBound(inputVars[1], -0.5) net1.setUpperBound(inputVars[1], 0.5) net1.setLowerBound(inputVars[2], -0.5) net1.setUpperBound(inputVars[2], 0.5) net1.setLowerBound(inputVars[3], -0.5) net1.setUpperBound(inputVars[3], 0.5) net1.setLowerBound(inputVars[4], -0.5) net1.setUpperBound(inputVars[4], 0.5) # property: output 3 is minimal outputVars = net1.outputVars[0] # print(outputVars.shape) MarabouUtils.addInequality(net1, [outputVars[3], outputVars[0]], [1, -1], 0) MarabouUtils.addInequality(net1, [outputVars[3], outputVars[1]], [1, -1], 0) MarabouUtils.addInequality(net1, [outputVars[3], outputVars[2]], [1, -1], 0) MarabouUtils.addInequality(net1, [outputVars[3], outputVars[4]], [1, -1], 0) options = Marabou.createOptions(dnc=True, verbosity=0, initialDivides=2) vals, stats = net1.solve(options=options) if vals: print('SAT') out_file = open('./data/{}_input_small_range.csv'.format(model_name), 'w') out_file.write('{},{},{},{},{}\n'.format(vals[inputVars[0]], vals[inputVars[1]], vals[inputVars[2]], vals[inputVars[3]], vals[inputVars[4]]))
def verify(self, network: networks.NeuralNetwork, prop: Property) -> (bool, typing.Optional[Tensor.Tensor]): """ Verify that the neural network of interest satisfy the property given as argument using the Marabou verification tool. Parameters ---------- network : NeuralNetwork The neural network to train. prop : Dataset The property which the neural network must satisfy. Returns ---------- (bool, Optional[Tensor]) True and None if the neural network satisfy the property, False and the counterexample otherwise. """ if isinstance(prop, SMTLIBProperty): targeted, bounds, target = utilities.parse_linf_robustness_smtlib(prop.smtlib_path) elif isinstance(prop, LocalRobustnessProperty): targeted = prop.targeted target = prop.target bounds = [] for i in range(len(prop.data)): if prop.data[i] + prop.epsilon > prop.bounds[i][1]: ub = prop.bounds[i][1] else: ub = prop.data[i] + prop.epsilon if prop.data[i] - prop.epsilon < prop.bounds[i][0]: lb = prop.bounds[i][0] else: lb = prop.data[i] - prop.epsilon bounds.append((lb, ub)) else: raise NotImplementedError if not targeted: raise NotImplementedError onnx_rep = cv.ONNXConverter().from_neural_network(network) onnx.save_model(onnx_rep.onnx_network, "temp/onnx_network.onnx") marabou_onnx_net = Marabou.read_onnx("temp/onnx_network.onnx") os.remove("temp/onnx_network.onnx") input_vars = marabou_onnx_net.inputVars[0][0] output_vars = marabou_onnx_net.outputVars assert(len(bounds) == len(input_vars)) for i in range(len(input_vars)): marabou_onnx_net.setLowerBound(input_vars[i], bounds[i][0]) marabou_onnx_net.setUpperBound(input_vars[i], bounds[i][1]) for i in range(len(output_vars)): if i != target: MarabouUtils.addInequality(marabou_onnx_net, [output_vars[i], output_vars[target]], [1, -1], 0) options = MarabouCore.Options() # options._verbosity = 2 vals, stats = marabou_onnx_net.solve(options=options) counterexample = None if not vals: sat = False else: sat = True counterexample = [val for val in vals.values()] counterexample = np.array(counterexample) return sat, counterexample