def test_step(self, data_dict, n_support, n_classes, n_query, n_unlabeled=0, n_episodes=1000): accuracies = list() losses = list() self.eval() for i in range(n_episodes): episode = create_episode(data_dict=data_dict, n_support=n_support, n_classes=n_classes, n_query=n_query, n_unlabeled=n_unlabeled) with torch.no_grad(): if n_unlabeled: loss, loss_dict = self.loss_softkmeans(episode) else: loss, loss_dict = self.loss(episode) accuracies.append(loss_dict["acc"]) losses.append(loss_dict["loss"]) return {"loss": np.mean(losses), "acc": np.mean(accuracies)}
def train_step(self, optimizer, data_dict: Dict[str, List[str]], n_support, n_classes, n_query): episode = create_episode(data_dict=data_dict, n_support=n_support, n_classes=n_classes, n_query=n_query) self.train() optimizer.zero_grad() torch.cuda.empty_cache() loss, loss_dict = self.loss(episode) loss.backward() optimizer.step() return loss, loss_dict
def test_step(self, data_dict, n_support, n_classes, n_query, n_episodes=1000): metrics = collections.defaultdict(list) self.eval() for i in range(n_episodes): episode = create_episode( data_dict=data_dict, n_support=n_support, n_classes=n_classes, n_query=n_query ) with torch.no_grad(): loss, loss_dict = self.loss(episode) for key, value in loss_dict["metrics"].items(): metrics[key].append(value) return { key: np.mean(value) for key, value in metrics.items() }
def train_step(self, optimizer, data_dict: Dict[str, List[str]], n_support: int, n_classes: int, n_query: int, n_unlabeled: int): episode = create_episode(data_dict=data_dict, n_support=n_support, n_classes=n_classes, n_query=n_query, n_unlabeled=n_unlabeled) self.train() optimizer.zero_grad() torch.cuda.empty_cache() if n_unlabeled: loss, loss_dict = self.loss_softkmeans(episode) else: loss, loss_dict = self.loss(episode) loss.backward() optimizer.step() return loss, loss_dict