def main(args): # Config device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available() and args.cuda >= 0 else "cpu") # model model = SimpleMLP(input_size=32 * 32 * 3, num_classes=10) # CL Benchmark Creation scenario = SplitCIFAR10(n_experiences=5, return_task_id=True) train_stream = scenario.train_stream test_stream = scenario.test_stream # Prepare for training & testing optimizer = Adam(model.parameters(), lr=0.01) criterion = CrossEntropyLoss() # Choose a CL strategy strategy = Naive( model=model, optimizer=optimizer, criterion=criterion, train_mb_size=128, train_epochs=3, eval_mb_size=128, device=device, ) # train and test loop for train_task in train_stream: strategy.train(train_task, num_workers=0) strategy.eval(test_stream)
def main(cuda: int): # --- CONFIG device = torch.device( f"cuda:{cuda}" if torch.cuda.is_available() else "cpu") # --- SCENARIO CREATION scenario = SplitCIFAR10(n_experiences=2, seed=42) # --------- # MODEL CREATION model = SimpleMLP(num_classes=scenario.n_classes, input_size=196608 // 64) # choose some metrics and evaluation method eval_plugin = EvaluationPlugin( accuracy_metrics(stream=True, experience=True), images_samples_metrics( on_train=True, on_eval=True, n_cols=10, n_rows=10, ), labels_repartition_metrics( # image_creator=repartition_bar_chart_image_creator, on_train=True, on_eval=True, ), loggers=[ TensorboardLogger(f"tb_data/{datetime.now()}"), InteractiveLogger(), ], ) # CREATE THE STRATEGY INSTANCE (NAIVE) cl_strategy = Naive( model, Adam(model.parameters()), train_mb_size=128, train_epochs=1, eval_mb_size=128, device=device, plugins=[ReplayPlugin(mem_size=1_000)], evaluator=eval_plugin, ) # TRAINING LOOP for i, experience in enumerate(scenario.train_stream, 1): cl_strategy.train(experience) cl_strategy.eval(scenario.test_stream[:i])
def test_no_errors(self): eval_plugin = EvaluationPlugin( accuracy_metrics(stream=True), loggers=None, benchmark=self.benchmark, strict_checks=True, ) strategy = Naive( self.model, self.optimizer, self.criterion, train_epochs=2, eval_every=0, evaluator=eval_plugin, ) for exp in self.benchmark.train_stream: strategy.train(exp, eval_streams=[self.benchmark.test_stream]) strategy.eval(self.benchmark.test_stream)
def _test_integration(self, module, clf_name, plugins=[]): module = as_multitask(module, clf_name) module = module.to(self.device) optimizer = SGD(module.parameters(), lr=0.05, momentum=0.9, weight_decay=0.0002) strategy = Naive( module, optimizer, train_mb_size=32, eval_mb_size=32, device=self.device, plugins=plugins, ) for t, experience in enumerate(self.benchmark.train_stream): strategy.train(experience) strategy.eval(self.benchmark.test_stream[:t + 1])
def test_raise_warning(self): eval_plugin = EvaluationPlugin( accuracy_metrics(stream=True), loggers=None, benchmark=self.benchmark, strict_checks=False, ) strategy = Naive( self.model, self.optimizer, self.criterion, train_epochs=2, eval_every=-1, evaluator=eval_plugin, ) for exp in self.benchmark.train_stream: strategy.train(exp) strategy.eval(self.benchmark.test_stream) with self.assertWarns(UserWarning): strategy.eval(self.benchmark.test_stream[:2])
def main(args): # Config device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available() and args.cuda >= 0 else "cpu") # Model model = SimpleCNN(num_classes=5) # CL Benchmark Creation scenario = EndlessCLSim( scenario=args.scenario, # "Classes", "Illumination", "Weather" sequence_order=None, task_order=None, semseg=args.semseg, dataset_root=args.dataset_root, ) train_stream = scenario.train_stream test_stream = scenario.test_stream # Prepare for training & testing optimizer = Adam(model.parameters(), lr=0.001) criterion = CrossEntropyLoss() # Choose a CL strategy strategy = Naive( model=model, optimizer=optimizer, criterion=criterion, train_mb_size=64, train_epochs=3, eval_mb_size=128, device=device, ) # Train and test loop for train_task in train_stream: strategy.train(train_task, num_worker=0) strategy.eval(test_stream) return
def test_callback_reachability(self): # Check that all the callbacks are called during # training and test loops. model = _PlainMLP(input_size=6, hidden_size=10) optimizer = SGD(model.parameters(), lr=1e-3) criterion = CrossEntropyLoss() benchmark = PluginTests.create_benchmark() plug = MockPlugin() strategy = Naive( model, optimizer, criterion, train_mb_size=100, train_epochs=1, eval_mb_size=100, device="cpu", plugins=[plug], ) strategy.evaluator.loggers = [TextLogger(sys.stdout)] strategy.train(benchmark.train_stream[0], num_workers=0) strategy.eval([benchmark.test_stream[0]], num_workers=0) assert all(plug.activated)
def main(args): # Device config device = torch.device( f"cuda:{args.cuda}" if torch.cuda.is_available() and args.cuda >= 0 else "cpu" ) # model model = SimpleMLP(num_classes=10) # Here we show all the MNIST variation we offer in the "classic" benchmarks if args.mnist_type == "permuted": scenario = PermutedMNIST(n_experiences=5, seed=1) elif args.mnist_type == "rotated": scenario = RotatedMNIST( n_experiences=5, rotations_list=[30, 60, 90, 120, 150], seed=1 ) else: scenario = SplitMNIST(n_experiences=5, seed=1) # Than we can extract the parallel train and test streams train_stream = scenario.train_stream test_stream = scenario.test_stream # Prepare for training & testing optimizer = SGD(model.parameters(), lr=0.001, momentum=0.9) criterion = CrossEntropyLoss() # Continual learning strategy with default logger cl_strategy = Naive( model, optimizer, criterion, train_mb_size=32, train_epochs=100, eval_mb_size=32, device=device, eval_every=1, plugins=[EarlyStoppingPlugin(args.patience, "test_stream")], ) # train and test loop results = [] for train_task, test_task in zip(train_stream, test_stream): print("Current Classes: ", train_task.classes_in_this_experience) cl_strategy.train(train_task, eval_streams=[test_task]) results.append(cl_strategy.eval(test_stream))
def main(args): # --- CONFIG device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available() and args.cuda >= 0 else "cpu") # --------- # --- TRANSFORMATIONS train_transform = transforms.Compose([ RandomCrop(28, padding=4), ToTensor(), transforms.Normalize((0.1307, ), (0.3081, )), ]) test_transform = transforms.Compose( [ToTensor(), transforms.Normalize((0.1307, ), (0.3081, ))]) # --------- # --- SCENARIO CREATION mnist_train = MNIST( root=expanduser("~") + "/.avalanche/data/mnist/", train=True, download=True, transform=train_transform, ) mnist_test = MNIST( root=expanduser("~") + "/.avalanche/data/mnist/", train=False, download=True, transform=test_transform, ) scenario = nc_benchmark(mnist_train, mnist_test, 5, task_labels=False, seed=1234) # --------- # MODEL CREATION model = SimpleMLP(num_classes=scenario.n_classes) interactive_logger = InteractiveLogger() wandb_logger = WandBLogger(project_name=args.project, run_name=args.run, config=vars(args)) eval_plugin = EvaluationPlugin( accuracy_metrics( minibatch=True, epoch=True, epoch_running=True, experience=True, stream=True, ), loss_metrics( minibatch=True, epoch=True, epoch_running=True, experience=True, stream=True, ), forgetting_metrics(experience=True, stream=True), confusion_matrix_metrics(stream=True, wandb=True, class_names=[str(i) for i in range(10)]), cpu_usage_metrics(minibatch=True, epoch=True, experience=True, stream=True), timing_metrics(minibatch=True, epoch=True, experience=True, stream=True), ram_usage_metrics(every=0.5, minibatch=True, epoch=True, experience=True, stream=True), gpu_usage_metrics( args.cuda, every=0.5, minibatch=True, epoch=True, experience=True, stream=True, ), disk_usage_metrics(minibatch=True, epoch=True, experience=True, stream=True), MAC_metrics(minibatch=True, epoch=True, experience=True), loggers=[interactive_logger, wandb_logger], ) # CREATE THE STRATEGY INSTANCE (NAIVE) cl_strategy = Naive( model, SGD(model.parameters(), lr=0.001, momentum=0.9), CrossEntropyLoss(), train_mb_size=100, train_epochs=4, eval_mb_size=100, device=device, evaluator=eval_plugin, ) # TRAINING LOOP print("Starting experiment...") results = [] for experience in scenario.train_stream: print("Start of experience: ", experience.current_experience) print("Current Classes: ", experience.classes_in_this_experience) cl_strategy.train(experience) print("Training completed") print("Computing accuracy on the whole test set") results.append(cl_strategy.eval(scenario.test_stream))
def main(args): # Model getter: specify dataset and depth of the network. model = pytorchcv_wrapper.resnet("cifar10", depth=20, pretrained=False) # Or get a more specific model. E.g. wide resnet, with depth 40 and growth # factor 8 for Cifar 10. # model = pytorchcv_wrapper.get_model("wrn40_8_cifar10", pretrained=False) # --- CONFIG device = torch.device( f"cuda:{args.cuda}" if torch.cuda.is_available() and args.cuda >= 0 else "cpu" ) device = "cpu" # --- TRANSFORMATIONS transform = transforms.Compose( [ ToTensor(), transforms.Normalize((0.491, 0.482, 0.446), (0.247, 0.243, 0.261)), ] ) # --- SCENARIO CREATION cifar_train = CIFAR10( root=expanduser("~") + "/.avalanche/data/cifar10/", train=True, download=True, transform=transform, ) cifar_test = CIFAR10( root=expanduser("~") + "/.avalanche/data/cifar10/", train=False, download=True, transform=transform, ) scenario = nc_benchmark( cifar_train, cifar_test, 5, task_labels=False, seed=1234, fixed_class_order=[i for i in range(10)], ) # choose some metrics and evaluation method interactive_logger = InteractiveLogger() eval_plugin = EvaluationPlugin( accuracy_metrics( minibatch=True, epoch=True, experience=True, stream=True ), loss_metrics(minibatch=True, epoch=True, experience=True, stream=True), forgetting_metrics(experience=True), loggers=[interactive_logger], ) # CREATE THE STRATEGY INSTANCE (Naive, with Replay) cl_strategy = Naive( model, torch.optim.SGD(model.parameters(), lr=0.01), CrossEntropyLoss(), train_mb_size=100, train_epochs=1, eval_mb_size=100, device=device, plugins=[ReplayPlugin(mem_size=1000)], evaluator=eval_plugin, ) # TRAINING LOOP print("Starting experiment...") results = [] for experience in scenario.train_stream: print("Start of experience ", experience.current_experience) cl_strategy.train(experience) print("Training completed") print("Computing accuracy on the whole test set") results.append(cl_strategy.eval(scenario.test_stream))
def main(args): # --- CONFIG device = torch.device( f"cuda:{args.cuda}" if torch.cuda.is_available() and args.cuda >= 0 else "cpu" ) # --------- tr_ds = [ AvalancheTensorDataset( torch.randn(10, 3), torch.randint(0, 3, (10,)).tolist(), task_labels=torch.randint(0, 5, (10,)).tolist(), ) for _ in range(3) ] ts_ds = [ AvalancheTensorDataset( torch.randn(10, 3), torch.randint(0, 3, (10,)).tolist(), task_labels=torch.randint(0, 5, (10,)).tolist(), ) for _ in range(3) ] scenario = create_multi_dataset_generic_benchmark( train_datasets=tr_ds, test_datasets=ts_ds ) # --------- # MODEL CREATION model = SimpleMLP(num_classes=3, input_size=3) # DEFINE THE EVALUATION PLUGIN AND LOGGER # The evaluation plugin manages the metrics computation. # It takes as argument a list of metrics and a list of loggers. # The evaluation plugin calls the loggers to serialize the metrics # and save them in persistent memory or print them in the standard output. # log to text file text_logger = TextLogger(open("log.txt", "a")) # print to stdout interactive_logger = InteractiveLogger() csv_logger = CSVLogger() eval_plugin = EvaluationPlugin( accuracy_metrics( minibatch=True, epoch=True, epoch_running=True, experience=True, stream=True, ), loss_metrics( minibatch=True, epoch=True, epoch_running=True, experience=True, stream=True, ), forgetting_metrics(experience=True, stream=True), bwt_metrics(experience=True, stream=True), cpu_usage_metrics( minibatch=True, epoch=True, epoch_running=True, experience=True, stream=True, ), timing_metrics( minibatch=True, epoch=True, epoch_running=True, experience=True, stream=True, ), ram_usage_metrics( every=0.5, minibatch=True, epoch=True, experience=True, stream=True ), gpu_usage_metrics( args.cuda, every=0.5, minibatch=True, epoch=True, experience=True, stream=True, ), disk_usage_metrics( minibatch=True, epoch=True, experience=True, stream=True ), MAC_metrics(minibatch=True, epoch=True, experience=True), loggers=[interactive_logger, text_logger, csv_logger], collect_all=True, ) # collect all metrics (set to True by default) # CREATE THE STRATEGY INSTANCE (NAIVE) cl_strategy = Naive( model, SGD(model.parameters(), lr=0.001, momentum=0.9), CrossEntropyLoss(), train_mb_size=500, train_epochs=1, eval_mb_size=100, device=device, evaluator=eval_plugin, eval_every=1, ) # TRAINING LOOP print("Starting experiment...") results = [] for i, experience in enumerate(scenario.train_stream): print("Start of experience: ", experience.current_experience) print("Current Classes: ", experience.classes_in_this_experience) # train returns a dictionary containing last recorded value # for each metric. res = cl_strategy.train(experience, eval_streams=[scenario.test_stream]) print("Training completed") print("Computing accuracy on the whole test set") # test returns a dictionary with the last metric collected during # evaluation on that stream results.append(cl_strategy.eval(scenario.test_stream)) print(f"Test metrics:\n{results}") # Dict with all the metric curves, # only available when `collect_all` is True. # Each entry is a (x, metric value) tuple. # You can use this dictionary to manipulate the # metrics without avalanche. all_metrics = cl_strategy.evaluator.get_all_metrics() print(f"Stored metrics: {list(all_metrics.keys())}")
def main(args): # --- CONFIG device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available() and args.cuda >= 0 else "cpu") # --------- # --- TRANSFORMATIONS train_transform = transforms.Compose([ RandomCrop(28, padding=4), ToTensor(), transforms.Normalize((0.1307, ), (0.3081, )), ]) test_transform = transforms.Compose( [ToTensor(), transforms.Normalize((0.1307, ), (0.3081, ))]) # --------- # --- SCENARIO CREATION mnist_train = MNIST( root=expanduser("~") + "/.avalanche/data/mnist/", train=True, download=True, transform=train_transform, ) mnist_test = MNIST( root=expanduser("~") + "/.avalanche/data/mnist/", train=False, download=True, transform=test_transform, ) scenario = nc_benchmark(mnist_train, mnist_test, 5, task_labels=False, seed=1234) # --------- # MODEL CREATION model = SimpleMLP(num_classes=scenario.n_classes) eval_plugin = EvaluationPlugin( accuracy_metrics(epoch=True, experience=True, stream=True), loss_metrics(epoch=True, experience=True, stream=True), # save image should be False to appropriately view # results in Interactive Logger. # a tensor will be printed confusion_matrix_metrics(save_image=False, normalize="all", stream=True), loggers=InteractiveLogger(), ) # CREATE THE STRATEGY INSTANCE (NAIVE) cl_strategy = Naive( model, SGD(model.parameters(), lr=0.001, momentum=0.9), CrossEntropyLoss(), train_mb_size=100, train_epochs=4, eval_mb_size=100, device=device, evaluator=eval_plugin, plugins=[ReplayPlugin(5000)], ) # TRAINING LOOP print("Starting experiment...") results = [] for experience in scenario.train_stream: print("Start of experience: ", experience.current_experience) print("Current Classes: ", experience.classes_in_this_experience) cl_strategy.train(experience) print("Training completed") print("Computing accuracy on the whole test set") results.append(cl_strategy.eval(scenario.test_stream))
def main(args): # Device config device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available() and args.cuda >= 0 else "cpu") # Intialize the model, stream and training strategy model = SimpleCNN(num_classes=10) if args.stream != "s_long": model_init = deepcopy(model) scenario = CTrL(stream_name=args.stream, save_to_disk=args.save, path=args.path, seed=10) train_stream = scenario.train_stream test_stream = scenario.test_stream val_stream = scenario.val_stream optimizer = SGD(model.parameters(), lr=0.001, momentum=0.9) criterion = CrossEntropyLoss() logger = EvaluationPlugin( accuracy_metrics(minibatch=False, epoch=False, experience=True, stream=True), loggers=[InteractiveLogger()], ) cl_strategy = Naive( model, optimizer, criterion, train_mb_size=32, device=device, train_epochs=args.max_epochs, eval_mb_size=128, evaluator=logger, plugins=[EarlyStoppingPlugin(50, "val_stream")], eval_every=5, ) # train and test loop for train_task, val_task in zip(train_stream, val_stream): cl_strategy.train(train_task, eval_streams=[val_task]) cl_strategy.eval(test_stream) transfer_mat = [] for tid in range(len(train_stream)): transfer_mat.append( logger.all_metric_results["Top1_Acc_Exp/eval_phase/test_stream/" f"Task00{tid}/Exp00{tid}"][1]) if args.stream == "s_long": res = logger.last_metric_results["Top1_Acc_Stream/eval_phase/" "test_stream"] print(f"Average accuracy on S_long : {res}") else: optimizer = SGD(model_init.parameters(), lr=0.001, momentum=0.9) cl_strategy = Naive( model_init, optimizer, criterion, train_mb_size=32, device=device, train_epochs=args.max_epochs, eval_mb_size=128, plugins=[EarlyStoppingPlugin(50, "val_stream")], eval_every=5, ) cl_strategy.train(train_stream[-1]) res = cl_strategy.eval([test_stream[-1]]) acc_last_stream = transfer_mat[-1][-1] acc_last_only = res["Top1_Acc_Exp/eval_phase/test_stream/" "Task005/Exp-01"] transfer_value = acc_last_stream - acc_last_only print(f"Accuracy on probe task after training on the whole " f"stream: {acc_last_stream}") print(f"Accuracy on probe task after trained " f"independently: {acc_last_only}") print(f"T({args.stream})={transfer_value}")
HYPER_PARAMETER[mode]["step_schedular_decay"], HYPER_PARAMETER[mode]["schedular_step"], ) plugin_list = [LRSchedulerPlugin(scheduler)] cl_strategy = Naive( model, optimizer, CrossEntropyLoss(), train_mb_size=HYPER_PARAMETER[mode]["batch_size"], train_epochs=num_epoch, eval_mb_size=HYPER_PARAMETER[mode]["batch_size"], evaluator=eval_plugin, device=device, plugins=plugin_list, ) # TRAINING LOOP print("Starting experiment...") results = [] print("Current input mode : ", mode) print("Current eval mode : ", eval_mode) for experience in scenario.train_stream: print("Start of experience: ", experience.current_experience) print("Current Classes: ", experience.classes_in_this_experience) res = cl_strategy.train(experience) print("Training completed") print("Computing accuracy on the whole test set with" f" {eval_mode} evaluation protocols") results.append(cl_strategy.eval(scenario.test_stream))
def main(args): """ Last Avalanche version reference performance (online = 1 epoch): Class-incremental (online): Top1_Acc_Stream/eval_phase/test_stream = 0.9421 Data-incremental (online: Top1_Acc_Stream/eval_phase/test_stream = 0.9309 These are reference results for a single run. """ # --- DEFAULT PARAMS ONLINE DATA INCREMENTAL LEARNING nb_tasks = 5 # Can still design the data stream based on tasks batch_size = 10 # Learning agent only has small amount of data available epochs = 1 # How many times to process each mini-batch return_task_id = False # Data incremental (task-agnostic/task-free) # --- CONFIG device = torch.device( f"cuda:{args.cuda}" if torch.cuda.is_available() and args.cuda >= 0 else "cpu" ) # --------- # --- SCENARIO CREATION n_classes = 10 task_scenario = SplitMNIST( nb_tasks, return_task_id=return_task_id, fixed_class_order=[i for i in range(n_classes)], ) # Make data incremental (one batch = one experience) scenario = data_incremental_benchmark( task_scenario, experience_size=batch_size ) print(f"{scenario.n_experiences} batches in online data incremental setup.") # 6002 batches for SplitMNIST with batch size 10 # --------- # MODEL CREATION model = SimpleMLP( num_classes=args.featsize, hidden_size=400, hidden_layers=2, drop_rate=0 ) # choose some metrics and evaluation method logger = TextLogger() eval_plugin = EvaluationPlugin( accuracy_metrics(experience=True, stream=True), loss_metrics(experience=False, stream=True), StreamForgetting(), loggers=[logger], benchmark=scenario, ) # CoPE PLUGIN cope = CoPEPlugin( mem_size=2000, alpha=0.99, p_size=args.featsize, n_classes=n_classes ) # CREATE THE STRATEGY INSTANCE (NAIVE) WITH CoPE PLUGIN cl_strategy = Naive( model, torch.optim.SGD(model.parameters(), lr=0.01), cope.ppp_loss, # CoPE PPP-Loss train_mb_size=batch_size, train_epochs=epochs, eval_mb_size=100, device=device, plugins=[cope], evaluator=eval_plugin, ) # TRAINING LOOP print("Starting experiment...") results = [] cl_strategy.train(scenario.train_stream) print("Computing accuracy on the whole test set") results.append(cl_strategy.eval(scenario.test_stream))
def main(args): model = SimpleMLP(hidden_size=args.hs) optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) criterion = torch.nn.CrossEntropyLoss() # check if selected GPU is available or use CPU assert args.cuda == -1 or args.cuda >= 0, "cuda must be -1 or >= 0." device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available() and args.cuda >= 0 else "cpu") print(f"Using device: {device}") # create scenario if args.scenario == "pmnist": scenario = PermutedMNIST(n_experiences=args.permutations) elif args.scenario == "smnist": mnist_train = MNIST( root=expanduser("~") + "/.avalanche/data/mnist/", train=True, download=True, transform=ToTensor(), ) mnist_test = MNIST( root=expanduser("~") + "/.avalanche/data/mnist/", train=False, download=True, transform=ToTensor(), ) scenario = nc_benchmark(mnist_train, mnist_test, 5, task_labels=False, seed=1234) else: raise ValueError("Wrong scenario name. Allowed pmnist, smnist.") # choose some metrics and evaluation method interactive_logger = InteractiveLogger() tensorboard_logger = TensorboardLogger() eval_plugin = EvaluationPlugin( accuracy_metrics(minibatch=True, epoch=True, experience=True, stream=True), loss_metrics(minibatch=True, epoch=True, experience=True, stream=True), forgetting_metrics(experience=True, stream=True), bwt_metrics(experience=True, stream=True), loggers=[interactive_logger, tensorboard_logger], ) # create strategy strategy = Naive( model, optimizer, criterion, train_epochs=args.epochs, device=device, train_mb_size=args.minibatch_size, evaluator=eval_plugin, plugins=[ RWalkPlugin( ewc_lambda=args.ewc_lambda, ewc_alpha=args.ewc_alpha, delta_t=args.delta_t, ) ], ) # train on the selected scenario with the chosen strategy print("Starting experiment...") results = [] for experience in scenario.train_stream: print("Start training on experience ", experience.current_experience) strategy.train(experience) print("End training on experience", experience.current_experience) print("Computing accuracy on the test set") results.append(strategy.eval(scenario.test_stream[:]))
def main(args): # --- CONFIG device = torch.device( f"cuda:{args.cuda}" if torch.cuda.is_available() and args.cuda >= 0 else "cpu" ) # --------- # --- TRANSFORMATIONS train_transform = transforms.Compose( [ RandomCrop(28, padding=4), ToTensor(), transforms.Normalize((0.1307,), (0.3081,)), ] ) test_transform = transforms.Compose( [ToTensor(), transforms.Normalize((0.1307,), (0.3081,))] ) # --------- # --- SCENARIO CREATION mnist_train = MNIST( root=expanduser("~") + "/.avalanche/data/mnist/", train=True, download=True, transform=train_transform, ) mnist_test = MNIST( root=expanduser("~") + "/.avalanche/data/mnist/", train=False, download=True, transform=test_transform, ) scenario = nc_benchmark( mnist_train, mnist_test, 5, task_labels=False, seed=1234 ) # --------- # MODEL CREATION model = SimpleMLP(num_classes=scenario.n_classes) # DEFINE THE EVALUATION PLUGIN AND LOGGER # The evaluation plugin manages the metrics computation. # It takes as argument a list of metrics and a list of loggers. # The evaluation plugin calls the loggers to serialize the metrics # and save them in persistent memory or print them in the standard output. # log to text file text_logger = TextLogger(open("log.txt", "a")) # print to stdout interactive_logger = InteractiveLogger() csv_logger = CSVLogger() eval_plugin = EvaluationPlugin( accuracy_metrics( minibatch=True, epoch=True, epoch_running=True, experience=True, stream=True, ), loss_metrics( minibatch=True, epoch=True, epoch_running=True, experience=True, stream=True, ), forgetting_metrics(experience=True, stream=True), bwt_metrics(experience=True, stream=True), forward_transfer_metrics(experience=True, stream=True), cpu_usage_metrics( minibatch=True, epoch=True, epoch_running=True, experience=True, stream=True, ), timing_metrics( minibatch=True, epoch=True, epoch_running=True, experience=True, stream=True, ), ram_usage_metrics( every=0.5, minibatch=True, epoch=True, experience=True, stream=True ), gpu_usage_metrics( args.cuda, every=0.5, minibatch=True, epoch=True, experience=True, stream=True, ), disk_usage_metrics( minibatch=True, epoch=True, experience=True, stream=True ), MAC_metrics(minibatch=True, epoch=True, experience=True), loggers=[interactive_logger, text_logger, csv_logger], collect_all=True, ) # collect all metrics (set to True by default) # CREATE THE STRATEGY INSTANCE (NAIVE) cl_strategy = Naive( model, SGD(model.parameters(), lr=0.001, momentum=0.9), CrossEntropyLoss(), train_mb_size=500, train_epochs=1, eval_mb_size=100, device=device, evaluator=eval_plugin, eval_every=1, ) # TRAINING LOOP print("Starting experiment...") results = [] for i, experience in enumerate(scenario.train_stream): print("Start of experience: ", experience.current_experience) print("Current Classes: ", experience.classes_in_this_experience) # train returns a dictionary containing last recorded value # for each metric. res = cl_strategy.train(experience, eval_streams=[scenario.test_stream]) print("Training completed") print("Computing accuracy on the whole test set") # test returns a dictionary with the last metric collected during # evaluation on that stream results.append(cl_strategy.eval(scenario.test_stream)) print(f"Test metrics:\n{results}") # Dict with all the metric curves, # only available when `collect_all` is True. # Each entry is a (x, metric value) tuple. # You can use this dictionary to manipulate the # metrics without avalanche. all_metrics = cl_strategy.evaluator.get_all_metrics() print(f"Stored metrics: {list(all_metrics.keys())}")
def main(args): # --- CONFIG device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available() and args.cuda >= 0 else "cpu") n_batches = 5 # --------- # --- TRANSFORMATIONS train_transform = transforms.Compose([ RandomCrop(28, padding=4), ToTensor(), transforms.Normalize((0.1307, ), (0.3081, )), ]) test_transform = transforms.Compose( [ToTensor(), transforms.Normalize((0.1307, ), (0.3081, ))]) # --------- # --- SCENARIO CREATION mnist_train = MNIST( root=expanduser("~") + "/.avalanche/data/mnist/", train=True, download=True, transform=train_transform, ) mnist_test = MNIST( root=expanduser("~") + "/.avalanche/data/mnist/", train=False, download=True, transform=test_transform, ) scenario = nc_benchmark(mnist_train, mnist_test, n_batches, task_labels=False, seed=1234) # --------- # MODEL CREATION model = SimpleMLP(num_classes=scenario.n_classes) # choose some metrics and evaluation method interactive_logger = InteractiveLogger() eval_plugin = EvaluationPlugin( accuracy_metrics(minibatch=True, epoch=True, experience=True, stream=True), loss_metrics(minibatch=True, epoch=True, experience=True, stream=True), forgetting_metrics(experience=True), loggers=[interactive_logger], ) # CREATE THE STRATEGY INSTANCE (NAIVE) cl_strategy = Naive( model, torch.optim.Adam(model.parameters(), lr=0.001), CrossEntropyLoss(), train_mb_size=100, train_epochs=4, eval_mb_size=100, device=device, plugins=[ReplayPlugin(mem_size=10000)], evaluator=eval_plugin, ) # TRAINING LOOP print("Starting experiment...") results = [] for experience in scenario.train_stream: print("Start of experience ", experience.current_experience) cl_strategy.train(experience) print("Training completed") print("Computing accuracy on the whole test set") results.append(cl_strategy.eval(scenario.test_stream))