def load_task(self, task_name, load_path): splits = ['train', 'val', 'test'] samples = [] save_paths = [] for split in splits: file_path = os.path.join(load_path, '{}_{}.pth'.format(task_name, split)) save_paths.append(file_path) assert os.path.isfile(file_path), file_path xs, ys = torch.load(file_path) samples.append((xs, ys)) metadata_file = os.path.join(load_path, '{}.meta'.format(task_name)) if os.path.isfile(metadata_file): meta = torch.load(metadata_file) else: meta = {} task = Task(task_name, samples, loss, split_names=self.split_names, id=len(self.task_pool), **meta) task.save_path = save_paths self.task_pool.append(task) self.contains_loaded_tasks = True return task
def _create_task(self, task_spec, name, save_path): concepts = task_spec.src_concepts attributes = task_spec.attributes transformation = task_spec.transformation n_samples_per_class = task_spec.n_samples_per_class samples = self.get_samples(concepts, attributes, transformation, n_samples_per_class) if self.flatten: samples = [(x.view(x.size(0), -1), y) for x, y in samples] task = Task(name, samples, loss, transformation, self.split_names, source_concepts=concepts, attributes=attributes, creator=self.strat.descr(), generator=self, n_samples_per_class=n_samples_per_class, save_path=save_path) return task