def test_online_Fellowship_mixeddatatype(dataset10c, types, error): datasets = [] for typ in types: d = copy.deepcopy(dataset10c) d._data_type = typ d._nb_classes = 10 datasets.append(d) if error: with pytest.raises(ValueError): scenario = OnlineFellowship(datasets) else: scenario = OnlineFellowship(datasets)
def test_online_Fellowship_transformation2(dataset7c, dataset10c, dataset20c): sizes = [16, 24, 40] transformations = [[trsf.Resize(size=(sizes[0], sizes[0])), trsf.ToTensor()], [trsf.Resize(size=(sizes[1], sizes[1])), trsf.ToTensor()], [trsf.Resize(size=(sizes[2], sizes[2])), trsf.ToTensor()]] scenario = OnlineFellowship([dataset7c, dataset10c, dataset20c], transformations=transformations) for task_id, taskset in enumerate(scenario): loader = DataLoader(taskset) x, _, _ = next(iter(loader)) assert x.shape[-1] == sizes[task_id]
def test_online_Fellowship_transformation(dataset7c, dataset10c, dataset20c, transformations): scenario = OnlineFellowship([dataset7c, dataset10c, dataset20c], transformations=transformations) assert len(scenario) == 3 tot_nb_classes = 0 for task_id, taskset in enumerate(scenario): tot_nb_classes += taskset.nb_classes loader = DataLoader(taskset) _, _, _ = next(iter(loader)) assert tot_nb_classes == scenario.nb_classes
def test_Online_Fellowship(dataset7c, dataset10c, dataset20c): scenario = OnlineFellowship([dataset7c, dataset10c, dataset20c]) for i, task_set in enumerate(scenario): if i == 0: assert task_set.nb_classes == 7 if i == 1: assert task_set.nb_classes == 10 if i == 2: assert task_set.nb_classes == 20 assert scenario[0].nb_classes == 7 assert scenario[1].nb_classes == 10 assert scenario[2].nb_classes == 20
def test_online_Fellowship_mix_path_array(list_datasets): list_dict_args = [{"data_path": DATA_PATH, "train": True, "download": False}] * len(list_datasets) list_instanciate_datasets = [] for i, dataset in enumerate(list_datasets): list_instanciate_datasets.append(dataset(**list_dict_args[i])) scenario = OnlineFellowship(list_instanciate_datasets, update_labels=True) assert len(scenario) == len(list_datasets) tot_nb_classes = 0 for task_id, taskset in enumerate(scenario): tot_nb_classes += taskset.nb_classes loader = DataLoader(taskset) _, _, _ = next(iter(loader)) assert tot_nb_classes == scenario.nb_classes
def test_Online_Fellowship_subscenarios(dataset7c, dataset10c, dataset20c): scenario = OnlineFellowship([dataset7c, dataset10c, dataset20c]) sub_scenario = create_subscenario(scenario, np.arange(scenario.nb_tasks - 1)) for task_set in sub_scenario: loader = DataLoader(task_set) for _ in loader: pass assert sub_scenario.nb_tasks == scenario.nb_tasks - 1 task_order = np.arange(scenario.nb_tasks) np.random.shuffle(task_order) sub_scenario = create_subscenario(scenario, task_order) for task_set in sub_scenario: loader = DataLoader(task_set) for _ in loader: pass assert sub_scenario.nb_tasks == scenario.nb_tasks
def create_subscenario(base_scenario, task_indexes): """ In this function we want to create a subscenario from the different tasks, either by subsampling tasks or reodering or both. :param base_scenario: scenario from which the subscenario will be created :param task_indexes: array with new order of tasks :return: A train PyTorch's Datasets. """ if torch.is_tensor(task_indexes): task_indexes = task_indexes.numpy() if base_scenario.transformations is not None and isinstance( base_scenario.transformations[0], list): transformations = [ base_scenario.transformations[i] for i in task_indexes ] else: transformations = base_scenario.transformations sub_scenario = None if isinstance(base_scenario, OnlineFellowship): # We just want to changes base_scenario.cl_datasets order new_cl_datasets = [base_scenario.cl_datasets[i] for i in task_indexes] sub_scenario = OnlineFellowship( new_cl_datasets, transformations=transformations, update_labels=base_scenario.update_labels) elif base_scenario.cl_dataset.data_type == TaskType.H5: list_taskset = [base_scenario[i] for i in task_indexes] sub_scenario = OnlineFellowship(list_taskset, transformations=transformations, update_labels=False) else: new_x, new_y, new_t = None, None, None if base_scenario.cl_dataset.bounding_boxes is not None: raise ValueError( "the function create_subscenario is not compatible with scenario with bounding_boxes yet." ) for i, index in enumerate(task_indexes): taskset = base_scenario[index] all_task_indexes = np.arange(len(taskset)) x, y, t = taskset.get_raw_samples(all_task_indexes) t = np.ones(len(y)) * i if new_x is None: new_x = x new_y = y new_t = t else: new_x = np.concatenate([new_x, x], axis=0) new_y = np.concatenate([new_y, y], axis=0) new_t = np.concatenate([new_t, t], axis=0) dataset = InMemoryDataset(new_x, new_y, new_t, data_type=base_scenario.cl_dataset.data_type) sub_scenario = ContinualScenario(dataset, transformations=transformations) return sub_scenario