コード例 #1
0
 def __init__(self):
     super(Model, self).__init__()
     self.fc1 = nn.Linear(28 * 28, 400)
     # nn.init.uniform_(self.fc1.bias, -0.7, -0.7)
     # nn.init.normal_(self.fc1.bias, -100, 99.3)
     self.fc2 = nn.Linear(400, 400)
     # nn.init.uniform_(self.fc2.bias, -0.7, -0.7)
     # nn.init.normal_(self.fc2.bias, -100, 99.3)
     self.fc3 = nn.Linear(400, 10)
     self.activate = nn.ReLU()
     self.name = 'mlp'
     ln = LogNormal(0, 1)
     self.mask1 = nn.Parameter(ln.sample(torch.Size([400])) - 1.5)
     # nn.init.uniform_(self.mask1, -1, 0)
     self.mask2 = nn.Parameter(ln.sample(torch.Size([400])) - 1.5)
コード例 #2
0
    theta3 = theta[..., 2:3]
    while design.dim() < theta1.dim():
        design = design.unsqueeze(0)
    x = 400. * theta2 * \
        (torch.exp(-theta1*design) - torch.exp(-theta2*design)) \
        / (theta3*(theta2-theta1))
    return x


loc = torch.zeros(design_adv.shape)
scale = 0.1 * torch.ones(design_adv.shape)
noise = Normal(loc=loc, scale=scale)
noise = Independent(noise, 1)

torch.manual_seed(0)
theta0 = prior.sample()
noise0 = noise.sample()
x0_adv = get_x(theta0, design_adv)
x0_uni = get_x(theta0, design_uni)
y0_adv = x0_adv + noise0
y0_uni = x0_uni + noise0
print("True parameters", theta0)

loc = theta0
scale = torch.tensor((0.2, 0.2, 0.2))
proposal = Normal(loc, scale)
proposal = Independent(proposal, 1)


def is_weights(theta, design, y0):
    x = get_x(theta, design)
コード例 #3
0
    x = 400. * theta2 * \
        (torch.exp(-theta1*design) - torch.exp(-theta2*design)) \
        / (theta3*(theta2-theta1))    
    return x

loc = torch.zeros(design_adv.shape)
scale = 0.1 * torch.ones(design_adv.shape)
noise = Normal(loc=loc, scale=scale)
noise = Independent(noise, 1)

#######################
## GENERATE PSEUDO-DATA
#######################

torch.manual_seed(0)
theta0 = prior.sample()
noise0 = noise.sample()
x0_adv = get_x(theta0, design_adv)
x0_fig = get_x(theta0, design_fig)
y0_adv = x0_adv + noise0
y0_fig = x0_fig + noise0
print("True parameters", theta0)

######################
## IMPORTANCE SAMPLING
######################

def is_weights(theta, design, y0):
    x = get_x(theta, design)
    diff = x - y0.unsqueeze(0)
    log_w = noise.log_prob(diff) - prior.log_prob(theta)
コード例 #4
0
torch.manual_seed(0)

def get_x(theta, design):
  theta1 = theta[:, 0:1]
  theta2 = theta[:, 1:2]
  theta3 = theta[:, 2:3]
  x = 400. * theta2 * (torch.exp(-theta1*design) - torch.exp(-theta2*design)) / (theta3*(theta2-theta1))
  return x

n_inner = 1000
n_outer = 100
loc = torch.tensor(np.log((0.1, 1., 20.)), dtype=torch.float64)
scale = torch.tensor(np.sqrt((0.05, 0.05, 0.05)), dtype=torch.float64)
prior = LogNormal(loc, scale)
prior = Independent(prior, 1)
theta_inner = prior.sample((n_inner,))
theta_outer = prior.sample((n_outer,))
loc = torch.zeros(15, dtype=torch.float64)
scale = 0.1 * torch.ones(15, dtype=torch.float64)
noise = Normal(loc, scale)
noise = Independent(noise, 1)
noise_entropy = noise.entropy()
noise_outer = noise.sample((n_outer,))

def objective(design):
  x_outer = get_x(theta_outer, design)
  x_inner = get_x(theta_inner, design)
  y_outer = x_outer + noise_outer
  # Get matrix of all y_outer-x_inner values
  diff = y_outer.unsqueeze(1) - x_inner.unsqueeze(0)
  log_prob_diff = noise.log_prob(diff)
コード例 #5
0
ファイル: prepare_data.py プロジェクト: AntixK/FedDyn
    def make(self,
             mode: int,
             num_clients: int,
             show_plots: bool = False,
             **kwargs) -> None:

        if os.path.exists(self.root_dir / "client_data"):
            shutil.rmtree(self.root_dir / "client_data")
        client_data_path = Path(self.root_dir / "client_data")
        client_data_path.mkdir()

        if not isinstance(self.test_data.targets, torch.Tensor):
            self.test_data.targets = torch.tensor(self.test_data.targets)
        test_data = [self.test_data[j] for j in range(len(self.test_data))]
        torch.save(test_data, client_data_path / "test_data.pth")

        if mode == 0:  # IID
            # Shuffle data
            data_ids = torch.randperm(self.num_train_data, dtype=torch.int32)
            num_data_per_client = self.num_train_data // num_clients

            if not isinstance(self.train_data.targets, torch.Tensor):
                self.train_data.targets = torch.tensor(self.train_data.targets)

            pbar = tqdm(range(num_clients), desc=f"{self.dataset_name} IID: ")
            for i in pbar:
                client_path = Path(client_data_path / str(i))
                client_path.mkdir()

                # TODO: Make this parallel for large number of clients & large datasets (Maybe not required)
                train_data = [
                    self.train_data[j]
                    for j in data_ids[i * num_data_per_client:(i + 1) *
                                      num_data_per_client]
                ]

                pbar.set_postfix({'# data / Client': num_data_per_client})

                if show_plots:
                    self._plot(train_data,
                               title=f"Client {i+1} Data Distribution")

                # Split data equally and send to the client
                torch.save(train_data, client_data_path / str(i) / "data.pth")
        elif mode == 1:  # Non IID Balanced
            num_data_per_client = self.num_train_data // num_clients
            classs_sampler = Dirichlet(
                torch.empty(self.num_classes).fill_(kwargs.get('dir_alpha')))
            # print(torch.empty(self.num_classes).fill_(2.0))
            if not isinstance(self.train_data.targets, torch.Tensor):
                self.train_data.targets = torch.tensor(self.train_data.targets)

            assigned_ids = []
            pbar = tqdm(range(num_clients),
                        desc=f"{self.dataset_name} Non-IID Balanced: ")
            for i in pbar:

                client_path = Path(client_data_path / str(i))
                client_path.mkdir()
                # Compute class prior probabilities for each client
                p_ij = classs_sampler.sample(
                )  # Share of jth class for ith client (always sums to 1)
                # print(p_ij)
                weights = torch.zeros(self.num_train_data)
                # print(torch.nonzero(self.train_data.targets == 9))
                for c_id in range(self.num_classes):
                    weights[self.train_data.targets == c_id] = p_ij[c_id]
                weights[
                    assigned_ids] = 0.0  # So that previously assigned data are not sampled again

                # Sample each data point uniformly without replacement based on
                # the sampling probability assigned based on its class
                data_ids = torch.multinomial(weights,
                                             num_data_per_client,
                                             replacement=False)

                train_data = [self.train_data[j] for j in data_ids]
                # print(f"Client {i} has {len(train_data)} data points.")
                pbar.set_postfix({'# data / Client': len(train_data)})

                assigned_ids += data_ids.tolist()

                torch.save(train_data, client_data_path / str(i) / "data.pth")

                if show_plots:
                    self._plot(train_data,
                               title=f"Client {i+1} Data Distribution")
        elif mode == 2:  # Non IID Unbalanced
            num_data_per_client = self.num_train_data // num_clients
            num_data_per_class = self.num_train_data / (self.num_classes *
                                                        num_clients)
            classs_sampler = Dirichlet(
                torch.empty(self.num_classes).fill_(kwargs.get('dir_alpha')))

            assigned_ids = []
            pbar = tqdm(range(num_clients),
                        desc=f"{self.dataset_name} Non-IID Unbalanced: ")

            if not isinstance(self.train_data.targets, torch.Tensor):
                self.train_data.targets = torch.tensor(self.train_data.targets)

            for i in pbar:
                train_data = []
                client_path = Path(client_data_path / str(i))
                client_path.mkdir()
                # Compute class prior probabilities for each client
                p_ij = classs_sampler.sample(
                )  # Share of jth class for ith client (always sums to 1)
                c_sampler = Categorical(p_ij)
                data_sampler = LogNormal(
                    torch.tensor(num_data_per_class).log(),
                    kwargs.get('lognorm_std'))

                while (True):
                    num_data_left = num_data_per_client - len(train_data)
                    c = c_sampler.sample()
                    num_data_c = int(data_sampler.sample())
                    # print(c, num_data_c, len(train_data))
                    data_ids = torch.nonzero(
                        self.train_data.targets == c.item()).flatten()
                    # data_ids = [x for x in data_ids if x not in assigned_ids] # Remove duplicated ids
                    # print(data_ids.shape)
                    num_data_c = min(num_data_c, data_ids.shape[0])
                    if num_data_c >= num_data_left:
                        train_data += [
                            self.train_data[j]
                            for j in data_ids[:num_data_left]
                        ]
                        break
                    else:
                        train_data += [
                            self.train_data[j] for j in data_ids[:num_data_c]
                        ]
                        assigned_ids += data_ids[:num_data_c].tolist()

                pbar.set_postfix({'# data / Client': len(train_data)})
                torch.save(train_data, client_data_path / str(i) / "data.pth")
                if show_plots:
                    self._plot(train_data,
                               title=f"Client {i+1} Data Distribution")

        else:
            raise ValueError("Unknown mode. Mode must be {0,1}")