Beispiel #1
0
    def Run(self, W, x, eps, seed):
        x = x.flatten()
        prng = np.random.RandomState(seed)

        striped_mapping = mapper.Striped(self.domain,
                                         self.stripe_dim).mapping()
        x_sub_list = meta.SplitByPartition(striped_mapping).transform(x)

        Ms = []
        ys = []
        scale_factors = []
        group_idx = sorted(set(striped_mapping))
        for i in group_idx:

            x_i = x_sub_list[group_idx.index(i)]
            P_i = support.projection_matrix(striped_mapping, i)

            M_bar = selection.HB(x_i.shape).select()
            y_i = measurement.Laplace(M_bar, eps).measure(x_i, prng)

            noise_scale_factor = laplace_scale_factor(M_bar, eps)

            M_i = M_bar * P_i

            Ms.append(M_i)
            ys.append(y_i)
            scale_factors.append(noise_scale_factor)

        x_hat = inference.LeastSquares().infer(Ms, ys, scale_factors)

        return x_hat
Beispiel #2
0
    def Run(self, W, x, eps, seed):
        assert len(x.shape) == 2, "Adaptive Grid only works for 2D domain"
        shape_2d = x.shape
        x = x.flatten()
        prng = np.random.RandomState(seed)
        Ms = []
        ys = []

        M = selection.UniformGrid(shape_2d, 
                                  self.data_scale, 
                                  eps, 
                                  ag_flag=True, 
                                  c=self.c).select()


        y  = measurement.Laplace(M, self.alpha*eps).measure(x, prng)
        x_hat = inference.LeastSquares().infer(M, y)

        Ms.append(M)
        ys.append(y)

        # Prepare parition object for later SplitByParition.
        # This Partition selection operator is missing from Figure 2, plan 12 in the paper.
        uniform_mapping = mapper.UGridPartition(shape_2d, 
                                                self.data_scale, 
                                                eps, 
                                                ag_flag=True, 
                                                c=self.c).mapping()
        x_sub_list =  meta.SplitByPartition(uniform_mapping).transform(x)
        sub_domains = support.get_subdomain_grid(uniform_mapping, shape_2d)

        ll, hi =[], []

        for i in sorted(set(uniform_mapping)):

            x_i = x_sub_list[i]
            P_i = support.projection_matrix(uniform_mapping, i) 
            x_hat_i =  P_i * x_hat 

            sub_domain_shape = sub_domains[i]
            M_i = selection.AdaptiveGrid(sub_domain_shape, 
                                         x_hat_i, 
                                         (1-self.alpha)*eps, 
                                         c2=self.c2).select()


            y_i = measurement.Laplace(M_i, (1-self.alpha)*eps).measure(x_i, prng)

            offset = np.unravel_index(P_i.matrix.nonzero()[1][0], shape_2d)
            ll.extend(M_i._lower + np.array(offset))
            hi.extend(M_i._higher + np.array(offset))

            ys.append(y_i)

        Ms.append(workload.RangeQueries(shape_2d, np.array(ll), np.array(hi)))
        x_hat = inference.LeastSquares().infer(Ms, ys)

        return x_hat
Beispiel #3
0
    def Run(self, W, x, eps, seed):
        assert len(x.shape) == 2, "Adaptive Grid only works for 2D domain"

        shape_2d = x.shape
        x = x.flatten()
        prng = np.random.RandomState(seed)
        Ms = []
        ys = []

        M = selection.UniformGrid(shape_2d, 
								  self.data_scale, 
								  eps, 
								  ag_flag=True, 
								  c=self.c).select()
        if not isinstance(M, np.ndarray):
            M = M.toarray()

        y  = measurement.Laplace(M, self.alpha*eps).measure(x, prng)
        x_hat = inference.LeastSquares().infer(M, y)

        Ms.append(M)
        ys.append(y)

        # Prepare parition object for later SplitByParition.
        # This Partition selection operator is missing from Figure 2, plan 12 in the paper.
        uniform_mapping = mapper.UGridPartition(shape_2d, 
												self.data_scale, 
												eps, 
												ag_flag=True, 
												c=self.c).mapping()
        x_sub_list =  meta.SplitByPartition(uniform_mapping).transform(x)
        sub_domains = support.get_subdomain_grid(uniform_mapping, shape_2d)

        for i in sorted(set(uniform_mapping)):
            x_i = x_sub_list[i]

            P_i = support.projection_matrix(uniform_mapping, i) 
            x_hat_i =  P_i * x_hat 

            sub_domain_shape = sub_domains[i]

            M_i = selection.AdaptiveGrid(sub_domain_shape, 
										 x_hat_i, 
										 (1-self.alpha)*eps, 
										 c2=self.c2).select()
            if not isinstance(M, np.ndarray):
                M_i = M_i.toarray()

            y_i = measurement.Laplace(M_i, (1-self.alpha)*eps).measure(x_i, prng)

            M_i_o = M_i * P_i

            Ms.append(M_i_o)
            ys.append(y_i)

        x_hat = inference.LeastSquares().infer(Ms, ys, [1.0]*len(ys))

        return x_hat
Beispiel #4
0
    def Run(self, W, x, eps, seed):
        x = x.flatten()            
        prng = np.random.RandomState(seed)

        striped_vectors = mapper.Striped(self.domain, self.stripe_dim).partitions()
        hd_vector = support.combine_all(striped_vectors)
        striped_mapping = hd_vector.flatten()

        x_sub_list = meta.SplitByPartition(striped_mapping).transform(x)

        Ms = []
        ys = []
        scale_factors = []
        group_idx = sorted(set(striped_mapping))

        # Given a group id on the full vector, recover the group id for each partition
        # put back in loop to save memory
        self.subgroups = {}
        for i in group_idx:
            selected_idx = np.where(hd_vector == i)
            ans = [p[i[0]] for p, i in zip(striped_vectors, selected_idx)]
            self.subgroups[i] = ans

        for i in group_idx: 
            x_i = x_sub_list[group_idx.index(i)]
            
            # overwriting standard projection for efficiency
            W_i = self.project_workload(W, striped_vectors, hd_vector, i)

            dawa = pmapper.Dawa(eps, self.ratio, self.approx)
            mapping = dawa.mapping(x_i, prng)
            reducer = transformation.ReduceByPartition(mapping)
            x_bar = reducer.transform(x_i)
            W_bar = W_i * support.expansion_matrix(mapping)

            M_bar = selection.GreedyH(x_bar.shape, W_bar).select()
            if not isinstance(M_bar, np.ndarray):
                M_bar = M_bar.toarray()

            y_i = measurement.Laplace(
                M_bar, eps * (1 - self.ratio)).measure(x_bar, prng)

            noise_scale_factor = laplace_scale_factor(
                M_bar, eps * (1 - self.ratio))

            # convert the measurement back to the original domain for inference
            P_i = support.projection_matrix(striped_mapping, i)
            M_i = (M_bar * support.reduction_matrix(mapping)) * P_i

            Ms.append(M_i)
            ys.append(y_i)
            scale_factors.append(noise_scale_factor)

        x_hat = inference.LeastSquares().infer(Ms, ys, scale_factors)

        return x_hat
Beispiel #5
0
    def split_by_partition(self, mapping):
        private_node_ids = self.kernel_service.partition(
            self.private_node_id, mapping)

        data_nodes = self.data_manager.partition(
            meta.SplitByPartition(mapping), self.prng, self.data_node)

        return [
            self._clone(private_node_id, data_node)
            for private_node_id, data_node in zip(private_node_ids, data_nodes)
        ]
Beispiel #6
0
    def Run(self, W, x, eps, seed):
        x = x.flatten()            
        prng = np.random.RandomState(seed)

        striped_mapping = mapper.Striped(self.domain, self.stripe_dim).mapping()
        x_sub_list = meta.SplitByPartition(striped_mapping).transform(x)

        Ms = []
        ys = []
        scale_factors = []
        group_idx = sorted(set(striped_mapping))

        W = get_matrix(W)

        for i in group_idx: 
            x_i = x_sub_list[group_idx.index(i)]
            P_i = support.projection_matrix(striped_mapping, i)
            W_i = W * P_i.T

            dawa = pmapper.Dawa(eps, self.ratio, self.approx)
            mapping = dawa.mapping(x_i, prng)
            reducer = transformation.ReduceByPartition(mapping)
            x_bar = reducer.transform(x_i)
            W_bar = W_i * support.expansion_matrix(mapping)

            M_bar = selection.GreedyH(x_bar.shape, W_bar).select()

            if not isinstance(M_bar, np.ndarray):
                M_bar = M_bar.toarray()

            y_i = measurement.Laplace(
                M_bar, eps * (1 - self.ratio)).measure(x_bar, prng)

            noise_scale_factor = laplace_scale_factor(
                M_bar, eps * (1 - self.ratio))

            M_i = (M_bar * support.reduction_matrix(mapping)) * P_i

            Ms.append(M_i)
            ys.append(y_i)
            scale_factors.append(noise_scale_factor)

        x_hat = inference.LeastSquares().infer(Ms, ys, scale_factors)

        return x_hat
Beispiel #7
0
    def partition(self, node_id, mapping):
        node = self.private_manager.graph().id_node_map[node_id]
        part = meta.SplitByPartition(mapping)
        private_nodes = self.private_manager.partition(part, after=node)

        return [private_node.id for private_node in private_nodes]