Exemple #1
0
    def Run(self, W, x, eps, seed):
        x = x.flatten()            
        prng = np.random.RandomState(seed)

        striped_vectors = mapper.Striped(self.domain, self.stripe_dim).partitions()
        hd_vector = support.combine_all(striped_vectors)
        striped_mapping = hd_vector.flatten()

        x_sub_list = meta.SplitByPartition(striped_mapping).transform(x)

        Ms = []
        ys = []
        scale_factors = []
        group_idx = sorted(set(striped_mapping))

        # Given a group id on the full vector, recover the group id for each partition
        # put back in loop to save memory
        self.subgroups = {}
        for i in group_idx:
            selected_idx = np.where(hd_vector == i)
            ans = [p[i[0]] for p, i in zip(striped_vectors, selected_idx)]
            self.subgroups[i] = ans

        for i in group_idx: 
            x_i = x_sub_list[group_idx.index(i)]
            
            # overwriting standard projection for efficiency
            W_i = self.project_workload(W, striped_vectors, hd_vector, i)

            dawa = pmapper.Dawa(eps, self.ratio, self.approx)
            mapping = dawa.mapping(x_i, prng)
            reducer = transformation.ReduceByPartition(mapping)
            x_bar = reducer.transform(x_i)
            W_bar = W_i * support.expansion_matrix(mapping)

            M_bar = selection.GreedyH(x_bar.shape, W_bar).select()
            if not isinstance(M_bar, np.ndarray):
                M_bar = M_bar.toarray()

            y_i = measurement.Laplace(
                M_bar, eps * (1 - self.ratio)).measure(x_bar, prng)

            noise_scale_factor = laplace_scale_factor(
                M_bar, eps * (1 - self.ratio))

            # convert the measurement back to the original domain for inference
            P_i = support.projection_matrix(striped_mapping, i)
            M_i = (M_bar * support.reduction_matrix(mapping)) * P_i

            Ms.append(M_i)
            ys.append(y_i)
            scale_factors.append(noise_scale_factor)

        x_hat = inference.LeastSquares().infer(Ms, ys, scale_factors)

        return x_hat
Exemple #2
0
    def mapping(self):

        assert self.proj_dim < len(self.domain_shape), "proj_dim out of range"

        margin_vec = [np.array([1] * i) for i in self.domain_shape]

        margin_vec[self.proj_dim] = np.arange(
            self.domain_shape[self.proj_dim])  # the projecting dimension

        return support.combine_all(margin_vec).flatten()
Exemple #3
0
    def Run(self, W, x, eps):

        striped_vectors = striped_partition(self.domain_shape, self.stripe_dim)
        hd_vector = support.combine_all(striped_vectors)
        striped_mapping = hd_vector.flatten()

        x_sub_list = x.split_by_partition(striped_mapping)

        Ms = []
        ys = []
        scale_factors = []
        group_idx = sorted(set(striped_mapping))

        # Given a group id on the full vector, recover the group id for each partition
        # put back in loop to save memory
        self.subgroups = {}
        for i in group_idx:
            selected_idx = np.where(hd_vector == i)
            ans = [p[i[0]] for p, i in zip(striped_vectors, selected_idx)]
            self.subgroups[i] = ans

        for i in group_idx:
            x_i = x_sub_list[group_idx.index(i)]

            # overwriting standard projection for efficiency
            W_i = self.project_workload(W, striped_vectors, hd_vector, i)

            mapping = x_i.dawa(self.ratio, self.approx, eps)
            x_bar = x_i.reduce_by_partition(mapping)

            W_bar = W_i * support.expansion_matrix(mapping)
            M_bar = greedyH((len(set(mapping)), ), W_bar)
            y_i = x_bar.laplace(M_bar, eps * (1 - self.ratio))

            noise_scale_factor = laplace_scale_factor(M_bar,
                                                      eps * (1 - self.ratio))

            # convert the measurement back to the original domain for inference
            P_i = support.projection_matrix(striped_mapping, i)
            M_i = (M_bar * support.reduction_matrix(mapping)) * P_i

            Ms.append(M_i)
            ys.append(y_i)
            scale_factors.append(laplace_scale_factor(M_bar, eps))

        x_hat = least_squares(Ms, ys, scale_factors)

        return x_hat
Exemple #4
0
    def mapping(self):
        vectors = [np.arange(dom, dtype=int) for dom in self.domain_size]
        vectors[self.stripe_dim] = np.ones(self.domain_size[self.stripe_dim],
                                           dtype=int)

        return support.combine_all(vectors).flatten()