def parse_batch_train(self, batch_x, batch_u): input_x = batch_x['img'][0] label_x = batch_x['label'] label_x = create_onehot(label_x, self.num_classes) input_u = batch_u['img'] input_x = input_x.to(self.device) label_x = label_x.to(self.device) input_u = [input_ui.to(self.device) for input_ui in input_u] return input_x, label_x, input_u
def parse_batch_train(self, batch): input = batch['img'] input2 = batch['img2'] label = batch['label'] domain = batch['domain'] label = create_onehot(label, self.num_classes) input = input.to(self.device) input2 = input2.to(self.device) label = label.to(self.device) return input, input2, label, domain
def parse_batch_train(self, batch_x, batch_u): input_x = batch_x['img'] input_x2 = batch_x['img2'] label_x = batch_x['label'] domain_x = batch_x['domain'] input_u = batch_u['img'] input_u2 = batch_u['img2'] label_x = create_onehot(label_x, self.num_classes) input_x = input_x.to(self.device) input_x2 = input_x2.to(self.device) label_x = label_x.to(self.device) input_u = input_u.to(self.device) input_u2 = input_u2.to(self.device) return input_x, input_x2, label_x, domain_x, input_u, input_u2
def parse_batch_train(self, batch_x, batch_u): input_x = batch_x['img'] input_x2 = batch_x['img2'] label_x = batch_x['label'] domain_x = batch_x['domain'] input_u = batch_u['img'] input_u2 = batch_u['img2'] if self.is_regressive: label_x = torch.cat([torch.unsqueeze(x, 1) for x in label_x], 1) #Stack list of tensors else: label_x = create_onehot(label_x, self.num_classes) input_x = input_x.to(self.device) input_x2 = input_x2.to(self.device) label_x = label_x.to(self.device) input_u = input_u.to(self.device) input_u2 = input_u2.to(self.device) return input_x, input_x2, label_x, domain_x, input_u, input_u2
def forward_backward(self, batch_x, batch_u): # Load data parsed_data = self.parse_batch_train(batch_x, batch_u) input_x, input_x2, label_x, domain_x, input_u, input_u2 = parsed_data input_x = torch.split(input_x, self.split_batch, 0) input_x2 = torch.split(input_x2, self.split_batch, 0) label_x = torch.split(label_x, self.split_batch, 0) domain_x = torch.split(domain_x, self.split_batch, 0) domain_x = [d[0].item() for d in domain_x] # x = data with small augmentations. x2 = data with large augmentations # They both correspond to the same datapoints. Same scheme for u and u2. # Generate pseudo label with torch.no_grad(): # Unsupervised predictions feat_u = self.F(input_u) pred_u = [] for k in range(self.dm.num_source_domains): pred_uk = self.E(k, feat_u) pred_uk = pred_uk.unsqueeze(1) pred_u.append(pred_uk) pred_u = torch.cat(pred_u, 1) # (B, K, C) # Pseudolabel = weighted predictions u_filter = self.G(feat_u) # (B, K) label_u_mask = (u_filter.max(1)[0] >= self.conf_thre ) # (B). 1 if >=1 expert > thre, 0 otherwise new_u_filter = torch.zeros(*u_filter.shape).to(self.device) for i, row in enumerate(u_filter): j_max = row.max(0)[1] new_u_filter[i, j_max] = 1 u_filter = new_u_filter d_closest = self.d_closest(u_filter).max(0)[1] u_filter = u_filter.unsqueeze(2).expand(*pred_u.shape) pred_fu = (pred_u * u_filter).sum( 1) # Zero out all non chosen experts pseudo_label_u = pred_fu.max(1)[1] # (B) pseudo_label_u = create_onehot(pseudo_label_u, self.num_classes).to(self.device) # Init losses loss_x = 0 loss_cr = 0 acc_x = 0 loss_filter = 0 acc_filter = 0 # Supervised and unsupervised features feat_x = [self.F(x) for x in input_x] feat_x2 = [self.F(x) for x in input_x2] feat_u2 = self.F(input_u2) for feat_xi, feat_x2i, label_xi, i in zip(feat_x, feat_x2, label_x, domain_x): cr_s = [j for j in domain_x if j != i] # Learning expert pred_xi = self.E(i, feat_xi) expert_label_xi = pred_xi.detach() if self.is_regressive: loss_x += ((pred_xi - label_xi)**2).sum(1).mean() else: loss_x += (-label_xi * torch.log(pred_xi + 1e-5)).sum(1).mean() acc_x += compute_accuracy(pred_xi.detach(), label_xi.max(1)[1])[0].item() x_filter = self.G(feat_xi) # Filter must be 1 for expert, 0 otherwise filter_label = torch.Tensor([0 for _ in range(len(domain_x)) ]).to(self.device) filter_label[i] = 1 filter_label = filter_label.unsqueeze(0).expand(*x_filter.shape) loss_filter += (-filter_label * torch.log(x_filter + 1e-5)).sum(1).mean() acc_filter += compute_accuracy(x_filter.detach(), filter_label.max(1)[1])[0].item() # Consistency regularization - Mean must follow the leading expert cr_pred = [] for j in cr_s: pred_j = self.E(j, feat_x2i) pred_j = pred_j.unsqueeze(1) cr_pred.append(pred_j) cr_pred = torch.cat(cr_pred, 1).mean(1) loss_cr += ((cr_pred - expert_label_xi)**2).sum(1).mean() loss_x /= self.n_domain loss_cr /= self.n_domain if not self.is_regressive: acc_x /= self.n_domain loss_filter /= self.n_domain acc_filter /= self.n_domain # Unsupervised loss pred_u = [] for k in range(self.dm.num_source_domains): pred_uk = self.E(k, feat_u2) pred_uk = pred_uk.unsqueeze(1) pred_u.append(pred_uk) pred_u = torch.cat(pred_u, 1).to(self.device) pred_u = pred_u.mean(1) if self.is_regressive: l_u = (-pseudo_label_u * torch.log(pred_u + 1e-5)).sum(1) else: l_u = ((pseudo_label_u - pred_u)**2).sum(1).mean() loss_u = (l_u * label_u_mask).mean() loss = 0 loss += loss_x loss += loss_cr loss += loss_filter loss += loss_u * self.weight_u self.model_backward_and_update(loss) loss_summary = { 'loss_x': loss_x.item(), 'loss_filter': loss_filter.item(), 'acc_filter': acc_filter, 'loss_cr': loss_cr.item(), 'loss_u': loss_u.item(), #'d_closest': d_closest.max(0)[1] 'd_closest': d_closest.item() } if not self.is_regressive: loss_summary['acc_x'] = acc_x if (self.batch_idx + 1) == self.num_batches: self.update_lr() return loss_summary
def forward_backward(self, batch_x, batch_u): parsed_data = self.parse_batch_train(batch_x, batch_u) input_x, input_x2, label_x, domain_x, input_u, input_u2 = parsed_data input_x = torch.split(input_x, self.split_batch, 0) input_x2 = torch.split(input_x2, self.split_batch, 0) label_x = torch.split(label_x, self.split_batch, 0) domain_x = torch.split(domain_x, self.split_batch, 0) domain_x = [d[0].item() for d in domain_x] # Generate pseudo label with torch.no_grad(): feat_u = self.F(input_u) pred_u = [] for k in range(self.dm.num_source_domains): pred_uk = self.E(k, feat_u) pred_uk = pred_uk.unsqueeze(1) pred_u.append(pred_uk) pred_u = torch.cat(pred_u, 1) # (B, K, C) # Get the highest probability and index (label) for each expert experts_max_p, experts_max_idx = pred_u.max(2) # (B, K) # Get the most confident expert max_expert_p, max_expert_idx = experts_max_p.max(1) # (B) pseudo_label_u = [] for i, experts_label in zip(max_expert_idx, experts_max_idx): pseudo_label_u.append(experts_label[i]) pseudo_label_u = torch.stack(pseudo_label_u, 0) pseudo_label_u = create_onehot(pseudo_label_u, self.num_classes) pseudo_label_u = pseudo_label_u.to(self.device) label_u_mask = (max_expert_p >= self.conf_thre).float() loss_x = 0 loss_cr = 0 acc_x = 0 feat_x = [self.F(x) for x in input_x] feat_x2 = [self.F(x) for x in input_x2] feat_u2 = self.F(input_u2) for feat_xi, feat_x2i, label_xi, i in zip(feat_x, feat_x2, label_x, domain_x): cr_s = [j for j in domain_x if j != i] # Learning expert pred_xi = self.E(i, feat_xi) loss_x += (-label_xi * torch.log(pred_xi + 1e-5)).sum(1).mean() expert_label_xi = pred_xi.detach() acc_x += compute_accuracy(pred_xi.detach(), label_xi.max(1)[1])[0].item() # Consistency regularization cr_pred = [] for j in cr_s: pred_j = self.E(j, feat_x2i) pred_j = pred_j.unsqueeze(1) cr_pred.append(pred_j) cr_pred = torch.cat(cr_pred, 1) cr_pred = cr_pred.mean(1) loss_cr += ((cr_pred - expert_label_xi)**2).sum(1).mean() loss_x /= self.n_domain loss_cr /= self.n_domain acc_x /= self.n_domain # Unsupervised loss pred_u = [] for k in range(self.dm.num_source_domains): pred_uk = self.E(k, feat_u2) pred_uk = pred_uk.unsqueeze(1) pred_u.append(pred_uk) pred_u = torch.cat(pred_u, 1) pred_u = pred_u.mean(1) l_u = (-pseudo_label_u * torch.log(pred_u + 1e-5)).sum(1) loss_u = (l_u * label_u_mask).mean() loss = 0 loss += loss_x loss += loss_cr loss += loss_u * self.weight_u self.model_backward_and_update(loss) loss_summary = { 'loss_x': loss_x.item(), 'acc_x': acc_x, 'loss_cr': loss_cr.item(), 'loss_u': loss_u.item() } if (self.batch_idx + 1) == self.num_batches: self.update_lr() return loss_summary