def test_Worst_approx(self): x = np.random.randint(0, 1000, size=self.domain_shape_1D) x_hat = np.random.randint(0, 1000, size=self.domain_shape_1D) prng = np.random.RandomState(10) op_worst_approx = pselection.WorstApprox(self.W, [], x_hat, 0.1) queries = op_worst_approx.select(x, prng) self.assertEqual(len(queries.shape), 2) self.assertEqual(queries.shape[1], 16)
def test_Worst_approx(self): x = np.random.randint(0, 1000, size=self.domain_shape_1D) x_hat = np.random.randint(0, 1000, size=self.domain_shape_1D) prng = np.random.RandomState(10) op_worst_approx = pselection.WorstApprox( self.W, sparse.csr_matrix(self.W.shape), x_hat, 0.1) queries = op_worst_approx.select(x, prng) self.assertEqual(len(queries.shape), 2) self.assertEqual(queries.shape[1], 16) self.assertEqual(len(queries.nonzero()[0]), 1) op_worst_approx = pselection.WorstApprox(self.W, queries, x_hat, 0.1) queries += op_worst_approx.select(x, prng) self.assertEqual(len(queries.shape), 2) self.assertEqual(queries.shape[1], 16) self.assertEqual(len(queries.nonzero()[0]), 2)
def Run(self, W, x, eps, seed): x = x.flatten() prng = np.random.RandomState(seed) domain_size = np.prod(self.domain_shape) # Start with a unifrom estimation of x x_hat = np.array([self.data_scale / float(domain_size)] * domain_size) # non-zero regs to avoid super long convergence time. nnls = inference.NonNegativeLeastSquares(l1_reg=1e-6, l2_reg=1e-6) measuredQueries = [] M_history = [] y_history = [] noise_scales = [] if self.total_noise_scale != 0: M_history.append(workload.Total(domain_size)) y_history.append(np.array([self.data_scale])) noise_scales.append(self.total_noise_scale) for i in range(1, self.rounds+1): eps_round = eps / float(self.rounds) # SW + SH2 worst_approx = pselection.WorstApprox(W, measuredQueries, x_hat, eps_round * self.ratio) W_next = worst_approx.select(x, prng) measuredQueries.append(W_next.mwem_index) M = selection.AddEquiWidthIntervals(W_next, i).select() laplace = measurement.Laplace(M, eps_round * (1-self.ratio)) y = laplace.measure(x, prng) # default use history M_history.append(M) y_history.append(y) noise_scales.append(laplace_scale_factor(M, eps_round * (1-self.ratio))) x_hat = nnls.infer(M_history, y_history, noise_scales) return x_hat
def Run(self, W, x, eps, seed): x = x.flatten() prng = np.random.RandomState(seed) domain_size = np.prod(self.domain_shape) # Start with a unifrom estimation of x x_hat = np.array([self.data_scale / float(domain_size)] * domain_size) W = get_matrix(W) if not isinstance(W, np.ndarray): W = W.toarray() measuredQueries = [] nnls = inference.NonNegativeLeastSquares(method='new') M_history = np.empty((0, domain_size)) y_history = [] for i in range(1, self.rounds+1): eps_round = eps / float(self.rounds) # SW + SH2 worst_approx = pselection.WorstApprox(W, measuredQueries, x_hat, eps_round * self.ratio) W_next = worst_approx.select(x, prng) measuredQueries.append(W_next.mwem_index) M = selection.AddEquiWidthIntervals(W_next, i).select() if not isinstance(M, np.ndarray): M = M.toarray() laplace = measurement.Laplace(M, eps_round * (1-self.ratio)) y = laplace.measure(x, prng) # default use history M_history = np.vstack([M_history, M]) y_history.extend(y) if self.total_noise_scale != 0: total_query = sparse.csr_matrix([1]*domain_size) noise_scale = laplace_scale_factor(M, eps_round * (1-self.ratio)) x_hat = nnls.infer([total_query, M_history], [[self.data_scale], y_history], [self.total_noise_scale, noise_scale]) else: x_hat = nnls.infer(M, y) return x_hat
def Run(self, W, x, eps, seed): x = x.flatten() prng = np.random.RandomState(seed) domain_size = np.prod(self.domain_shape) # Start with a uniform estimation of x x_hat = np.array([self.data_scale / float(domain_size)] * domain_size) W = get_matrix(W) if not isinstance(W, np.ndarray): W = W.toarray() measuredQueries = [] mult_weight = inference.MultiplicativeWeights(updateRounds = self.update_rounds) M_history = np.empty((0, domain_size)) y_history = [] for i in range(1, self.rounds+1): eps_round = eps / float(self.rounds) # SW worst_approx = pselection.WorstApprox(W, measuredQueries, x_hat, eps_round * self.ratio, 'EXPONENTIAL') M = worst_approx.select(x, prng) measuredQueries.append(M.mwem_index) if not isinstance(M, np.ndarray): M = M.toarray() # LM laplace = measurement.Laplace(M, eps_round * (1-self.ratio)) y = laplace.measure(x, prng) M_history = np.vstack([M_history, M]) y_history.extend(y) # MW if self.use_history: x_hat = mult_weight.infer(M_history, y_history, x_hat) else: x_hat = mult_weight.infer(M, y, x_hat) return x_hat
def Run(self, W, x, eps, seed): prng = np.random.RandomState(seed) domain_size = np.prod(self.domain_shape) # Start with a unifrom estimation of x x_hat = np.array([self.data_scale / float(domain_size)] * domain_size) W = get_matrix(W) W_partial = sparse.csr_matrix(W.shape) nnls = inference.NonNegativeLeastSquares() M_history = np.empty((0, domain_size)) y_history = [] for i in range(1, self.rounds+1): eps_round = eps / float(self.rounds) # SW + SH2 worst_approx = pselection.WorstApprox(sparse.csr_matrix(W), W_partial, x_hat, eps_round * self.ratio) W_next = worst_approx.select(x, prng) M = selection.AddEquiWidthIntervals(W_next, i).select() W_partial += W_next laplace = measurement.Laplace(M, eps_round * (1-self.ratio)) y = laplace.measure(x, prng) # default use history M_history = sparse.vstack([M_history, M]) y_history.extend(y) if self.total_noise_scale != 0: total_query = sparse.csr_matrix([1]*domain_size) noise_scale = laplace_scale_factor(M, eps_round * (1-self.ratio)) x_hat = nnls.infer([total_query, M_history], [[self.data_scale], y_history], [self.total_noise_scale, noise_scale]) else: x_hat = nnls.infer(M, y) return x_hat
def Run(self, W, x, eps, seed): x = x.flatten() prng = np.random.RandomState(seed) domain_size = np.prod(self.domain_shape) # Start with a unifrom estimation of x x_hat = np.array([self.data_scale / float(domain_size)] * domain_size) W = get_matrix(W) W_partial = sparse.csr_matrix(W.shape) mult_weight = inference.MultiplicativeWeights(updateRounds = self.update_rounds) M_history = np.empty((0, domain_size)) y_history = [] for i in range(1, self.rounds+1): eps_round = eps / float(self.rounds) # SW worst_approx = pselection.WorstApprox(sparse.csr_matrix(W), W_partial, x_hat, eps_round * self.ratio, 'EXPONENTIAL') W_next = worst_approx.select(x, prng) M = support.extract_M(W_next) W_partial += W_next # LM laplace = measurement.Laplace(M, eps_round * (1-self.ratio)) y = laplace.measure(x, prng) M_history = sparse.vstack([M_history, M]) y_history.extend(y) # MW if self.use_history: x_hat = mult_weight.infer(M_history, y_history, x_hat) else: x_hat = mult_weight.infer(M, y, x_hat) return x_hat
def Run(self, W, x, eps, seed): x = x.flatten() prng = np.random.RandomState(seed) domain_size = np.prod(self.domain_shape) # Start with a unifrom estimation of x x_hat = np.array([self.data_scale / float(domain_size)] * domain_size) measuredQueries = [] mult_weight = inference.MultiplicativeWeights(updateRounds = self.update_rounds) M_history = [] y_history = [] for i in range(1, self.rounds+1): eps_round = eps / float(self.rounds) # SW + SH2 worst_approx = pselection.WorstApprox(W, measuredQueries, x_hat, eps_round * self.ratio) W_next = worst_approx.select(x, prng) measuredQueries.append(W_next.mwem_index) M = selection.AddEquiWidthIntervals(W_next, i).select() # LM laplace = measurement.Laplace(M, eps_round * (1-self.ratio)) y = laplace.measure(x, prng) M_history.append(M) y_history.append(y) # MW if self.use_history: x_hat = mult_weight.infer(M_history, y_history, x_hat) else: x_hat = mult_weight.infer(M, y, x_hat) return x_hat
def Run(self, W, x, eps, seed): prng = np.random.RandomState(seed) x_hat = prng.rand(*x.shape) W_partial = sparse.csr_matrix(W.get_matrix().shape) nnls = inference.NonNegativeLeastSquares() measured_queries = [] for i in range(1, self.rounds + 1): eps_round = eps / float(self.rounds) worst_approx = pselection.WorstApprox( sparse.csr_matrix(W.get_matrix()), W_partial, x_hat, eps_round * self.ratio) W_next = worst_approx.select(x, prng) M = support.extract_M(W_next) W_partial += W_next laplace = measurement.Laplace(M, eps * (1 - self.ratio)) y = laplace.measure(x, prng) x_hat = nnls.infer(M, y) return x_hat
def Run(self, W, x, eps, seed): prng = np.random.RandomState(seed) x_hat = prng.rand(*x.shape) W_partial = sparse.csr_matrix(W.get_matrix().shape) mult_weight = inference.MultiplicativeWeights() measured_queries = [] for i in range(1, self.rounds + 1): eps_round = eps / float(self.rounds) # SW + SH2 worst_approx = pselection.WorstApprox( sparse.csr_matrix(W.get_matrix()), W_partial, x_hat, eps_round * self.ratio) W_next = worst_approx.select(x, prng) M = selection.AddEquiWidthIntervals(W_next, i).select() W_partial += W_next laplace = measurement.Laplace(M, eps * (1 - self.ratio)) y = laplace.measure(x, prng) x_hat = mult_weight.infer(M, y, x_hat) return x_hat