def main(args): # --- CONFIG device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available() and args.cuda >= 0 else "cpu") n_batches = 5 # --------- # --- TRANSFORMATIONS train_transform = transforms.Compose([ RandomCrop(28, padding=4), ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ]) test_transform = transforms.Compose([ ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ]) # --------- # --- SCENARIO CREATION mnist_train = MNIST('./data/mnist', train=True, download=True, transform=train_transform) mnist_test = MNIST('./data/mnist', train=False, download=True, transform=test_transform) scenario = nc_scenario( mnist_train, mnist_test, n_batches, task_labels=False, seed=1234) # --------- # MODEL CREATION model = SimpleMLP(num_classes=scenario.n_classes) # choose some metrics and evaluation method interactive_logger = InteractiveLogger() eval_plugin = EvaluationPlugin( accuracy_metrics( minibatch=True, epoch=True, experience=True, stream=True), loss_metrics(minibatch=True, epoch=True, experience=True, stream=True), ExperienceForgetting(), loggers=[interactive_logger]) # CREATE THE STRATEGY INSTANCE (NAIVE) cl_strategy = Naive(model, torch.optim.Adam(model.parameters(), lr=0.001), CrossEntropyLoss(), train_mb_size=100, train_epochs=4, eval_mb_size=100, device=device, plugins=[ReplayPlugin(mem_size=10000)], evaluator=eval_plugin ) # TRAINING LOOP print('Starting experiment...') results = [] for experience in scenario.train_stream: print("Start of experience ", experience.current_experience) cl_strategy.train(experience) print('Training completed') print('Computing accuracy on the whole test set') results.append(cl_strategy.eval(scenario.test_stream))
def main(args): """ Last Avalanche version reference performance (online): Top1_Acc_Stream/eval_phase/test_stream = 0.9421 """ # --- DEFAULT PARAMS ONLINE DATA INCREMENTAL LEARNING nb_tasks = 5 # Can still design the data stream based on tasks epochs = 1 # All data is only seen once: Online batch_size = 10 # Only process small amount of data at a time return_task_id = False # Data incremental (task-agnostic/task-free) # TODO use data_incremental_generator, now experience=task # --- CONFIG device = torch.device( f"cuda:{args.cuda}" if torch.cuda.is_available() and args.cuda >= 0 else "cpu") # --------- # --- SCENARIO CREATION scenario = SplitMNIST(nb_tasks, return_task_id=return_task_id, fixed_class_order=[i for i in range(10)]) # --------- # MODEL CREATION model = SimpleMLP(num_classes=args.featsize, hidden_size=400, hidden_layers=2) # choose some metrics and evaluation method interactive_logger = InteractiveLogger() eval_plugin = EvaluationPlugin( accuracy_metrics(experience=True, stream=True), loss_metrics(experience=True, stream=True), ExperienceForgetting(), loggers=[interactive_logger]) # CoPE PLUGIN cope = CoPEPlugin(mem_size=2000, p_size=args.featsize, n_classes=scenario.n_classes) # CREATE THE STRATEGY INSTANCE (NAIVE) WITH CoPE PLUGIN cl_strategy = Naive(model, torch.optim.SGD(model.parameters(), lr=0.01), cope.loss, # CoPE PPP-Loss train_mb_size=batch_size, train_epochs=epochs, eval_mb_size=100, device=device, plugins=[cope], evaluator=eval_plugin ) # TRAINING LOOP print('Starting experiment...') results = [] for experience in scenario.train_stream: print("Start of experience ", experience.current_experience) cl_strategy.train(experience) print('Training completed') print('Computing accuracy on the whole test set') results.append(cl_strategy.eval(scenario.test_stream))
def main(args): model = SimpleMLP(hidden_size=args.hs) optimizer = torch.optim.SGD(model.parameters(), lr=args.lr) criterion = torch.nn.CrossEntropyLoss() # check if selected GPU is available or use CPU assert args.cuda == -1 or args.cuda >= 0, "cuda must be -1 or >= 0." device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available() and args.cuda >= 0 else "cpu") print(f'Using device: {device}') # create scenario if args.scenario == 'pmnist': scenario = PermutedMNIST(n_experiences=args.permutations) elif args.scenario == 'smnist': scenario = SplitMNIST(n_experiences=5, return_task_id=False) else: raise ValueError("Wrong scenario name. Allowed pmnist, smnist.") # choose some metrics and evaluation method interactive_logger = InteractiveLogger() text_logger = TextLogger(open('log.txt', 'a')) eval_plugin = EvaluationPlugin(accuracy_metrics(minibatch=True, epoch=True, experience=True, stream=True), loss_metrics(minibatch=True, epoch=True, experience=True, stream=True), ExperienceForgetting(), loggers=[interactive_logger]) # create strategy strategy = EWC(model, optimizer, criterion, args.ewc_lambda, args.ewc_mode, decay_factor=args.decay_factor, train_epochs=args.epochs, device=device, train_mb_size=args.minibatch_size, evaluator=eval_plugin) # train on the selected scenario with the chosen strategy print('Starting experiment...') results = [] for experience in scenario.train_stream: print("Start training on experience ", experience.current_experience) strategy.train(experience) print("End training on experience", experience.current_experience) print('Computing accuracy on the test set') results.append(strategy.eval(scenario.test_stream[:]))
def main(args): model = SimpleMLP(hidden_size=args.hs) optimizer = torch.optim.SGD(model.parameters(), lr=args.lr) criterion = torch.nn.CrossEntropyLoss() # check if selected GPU is available or use CPU assert args.cuda == -1 or args.cuda >= 0, "cuda must be -1 or >= 0." device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available() and args.cuda >= 0 else "cpu") print(f'Using device: {device}') # create split scenario scenario = SplitMNIST(n_experiences=5, return_task_id=False) interactive_logger = InteractiveLogger() eval_plugin = EvaluationPlugin(accuracy_metrics(minibatch=True, epoch=True, experience=True, stream=True), loss_metrics(minibatch=True, epoch=True, experience=True, stream=True), ExperienceForgetting(), loggers=[interactive_logger]) # create strategy assert len(args.lwf_alpha) == 1 or len(args.lwf_alpha) == 5,\ 'Alpha must be a non-empty list.' lwf_alpha = args.lwf_alpha[0] if len( args.lwf_alpha) == 1 else args.lwf_alpha strategy = LwF(model, optimizer, criterion, alpha=lwf_alpha, temperature=args.softmax_temperature, train_epochs=args.epochs, device=device, train_mb_size=args.minibatch_size, evaluator=eval_plugin) # train on the selected scenario with the chosen strategy print('Starting experiment...') results = [] for train_batch_info in scenario.train_stream: print("Start training on experience ", train_batch_info.current_experience) strategy.train(train_batch_info, num_workers=4) print("End training on experience ", train_batch_info.current_experience) print('Computing accuracy on the test set') results.append(strategy.eval(scenario.test_stream[:]))
def main(args): # --- CONFIG device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available() and args.cuda >= 0 else "cpu") # --------- # --- TRANSFORMATIONS train_transform = transforms.Compose([ Resize(224), ToTensor(), transforms.Normalize((0.1307, ), (0.3081, )) ]) test_transform = transforms.Compose([ Resize(224), ToTensor(), transforms.Normalize((0.1307, ), (0.3081, )) ]) # --------- # --- SCENARIO CREATION scenario = SplitCIFAR10(5, train_transform=train_transform, eval_transform=test_transform) # --------- # MODEL CREATION model = MobilenetV1() adapt_classification_layer(model, scenario.n_classes, bias=False) # DEFINE THE EVALUATION PLUGIN AND LOGGER my_logger = TensorboardLogger(tb_log_dir="logs", tb_log_exp_name="logging_example") # print to stdout interactive_logger = InteractiveLogger() evaluation_plugin = EvaluationPlugin( accuracy_metrics(minibatch=True, epoch=True, experience=True, stream=True), loss_metrics(minibatch=True, epoch=True, experience=True, stream=True), ExperienceForgetting(), loggers=[my_logger, interactive_logger]) # CREATE THE STRATEGY INSTANCE (NAIVE with the Synaptic Intelligence plugin) cl_strategy = SynapticIntelligence(model, Adam(model.parameters(), lr=0.001), CrossEntropyLoss(), si_lambda=0.0001, train_mb_size=128, train_epochs=4, eval_mb_size=128, device=device, evaluator=evaluation_plugin) # TRAINING LOOP print('Starting experiment...') results = [] for experience in scenario.train_stream: print("Start of experience: ", experience.current_experience) print("Current Classes: ", experience.classes_in_this_experience) cl_strategy.train(experience) print('Training completed') print('Computing accuracy on the whole test set') results.append(cl_strategy.eval(scenario.test_stream))
) # log to Tensorboard tb_logger = TensorboardLogger(f"./tb_data/{cur_time}-SimpleMLP/") # log to text file text_logger = TextLogger(open(f"./logs/{cur_time}-SimpleMLP.txt", "w+")) # print to stdout interactive_logger = InteractiveLogger() eval_plugin = EvaluationPlugin( accuracy_metrics(minibatch=True, epoch=True, experience=True, stream=True), loss_metrics(minibatch=True, epoch=True, experience=True, stream=True), timing_metrics(epoch=True, epoch_running=True), ExperienceForgetting(), cpu_usage_metrics(experience=True), StreamConfusionMatrix(num_classes=2, save_image=False), disk_usage_metrics(minibatch=True, epoch=True, experience=True, stream=True), loggers=[interactive_logger, text_logger, tb_logger], ) cl_strategy = GEM( model, optimizer=Adam(model.parameters()), patterns_per_exp=1470, criterion=CrossEntropyLoss(), train_mb_size=128, train_epochs=50, eval_mb_size=128, evaluator=eval_plugin,
def main(args): # --- CONFIG device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available() and args.cuda >= 0 else "cpu") # --------- # --- TRANSFORMATIONS train_transform = transforms.Compose([ RandomCrop(28, padding=4), ToTensor(), transforms.Normalize((0.1307, ), (0.3081, )) ]) test_transform = transforms.Compose( [ToTensor(), transforms.Normalize((0.1307, ), (0.3081, ))]) # --------- # --- SCENARIO CREATION mnist_train = MNIST('./data/mnist', train=True, download=True, transform=train_transform) mnist_test = MNIST('./data/mnist', train=False, download=True, transform=test_transform) scenario = nc_scenario(mnist_train, mnist_test, 5, task_labels=False, seed=1234) # --------- # MODEL CREATION model = SimpleMLP(num_classes=scenario.n_classes) # DEFINE THE EVALUATION PLUGIN AND LOGGER # The evaluation plugin manages the metrics computation. # It takes as argument a list of metrics and a list of loggers. # The evaluation plugin calls the loggers to serialize the metrics # and save them in persistent memory or print them in the standard output. # log to text file text_logger = TextLogger(open('log.txt', 'a')) # print to stdout interactive_logger = InteractiveLogger() eval_plugin = EvaluationPlugin(accuracy_metrics(minibatch=True, epoch=True, experience=True, stream=True), loss_metrics(minibatch=True, epoch=True, experience=True, stream=True), cpu_usage_metrics(minibatch=True, epoch=True, experience=True, stream=True), timing_metrics(minibatch=True, epoch=True, experience=True, stream=True), ExperienceForgetting(), loggers=[interactive_logger, text_logger]) # CREATE THE STRATEGY INSTANCE (NAIVE) cl_strategy = Naive(model, SGD(model.parameters(), lr=0.001, momentum=0.9), CrossEntropyLoss(), train_mb_size=500, train_epochs=1, eval_mb_size=100, device=device, evaluator=eval_plugin) # TRAINING LOOP print('Starting experiment...') results = [] for experience in scenario.train_stream: print("Start of experience: ", experience.current_experience) print("Current Classes: ", experience.classes_in_this_experience) # train returns a list of dictionaries (one for each experience). Each # dictionary stores the last value of each metric curve emitted # during training. res = cl_strategy.train(experience) print('Training completed') print('Computing accuracy on the whole test set') # test also returns a dictionary results.append(cl_strategy.eval(scenario.test_stream)) print(f"Test metrics:\n{results}") # All the metric curves (x,y values) are stored inside the evaluator # (can be disabled). You can use this dictionary to manipulate the # metrics without avalanche. all_metrics = cl_strategy.evaluator.all_metrics print(f"Stored metrics: {list(all_metrics.keys())}") mname = 'Top1_Acc_Task/Task000' print(f"{mname}: {cl_strategy.evaluator.all_metrics[mname]}")
def main(): args = parser.parse_args() args.cuda = args.cuda == 'yes' args.disable_pbar = args.disable_pbar == 'yes' args.stable_sgd = args.stable_sgd == 'yes' print(f"args={vars(args)}") device = torch.device("cuda:0" if torch.cuda.is_available() and args.cuda else "cpu") print(f'Using device: {device}') # unique identifier uid = uuid.uuid4().hex if args.uid is None else args.uid now = str(datetime.datetime.now().date()) + "_" + ':'.join(str(datetime.datetime.now().time()).split(':')[:-1]) runname = 'T={}_id={}'.format(now, uid) if not args.resume else args.resume # Paths setupname = [args.strategy, args.exp_name, args.model, args.scenario] parentdir = os.path.join(args.save_path, '_'.join(setupname)) results_path = Path(os.path.join(parentdir, runname)) results_path.mkdir(parents=True, exist_ok=True) tb_log_dir = os.path.join(results_path, 'tb_run') # Group all runs # Eval results eval_metric = 'Top1_Acc_Stream/eval_phase/test_stream' eval_results_dir = results_path / eval_metric.split('/')[0] eval_results_dir.mkdir(parents=True, exist_ok=True) eval_result_files = [] # To avg over seeds seeds = [args.seed] if args.seed is not None else list(range(args.n_seeds)) for seed in seeds: # initialize seeds print("STARTING SEED {}/{}".format(seed, len(seeds) - 1)) set_seed(seed) # create scenario if args.scenario == 'smnist': inputsize = 28 * 28 scenario = SplitMNIST(n_experiences=5, return_task_id=False, seed=seed, fixed_class_order=[i for i in range(10)]) elif args.scenario == 'CIFAR10': scenario = SplitCIFAR10(n_experiences=5, return_task_id=False, seed=seed, fixed_class_order=[i for i in range(10)]) inputsize = (3, 32, 32) elif args.scenario == 'miniimgnet': scenario = SplitMiniImageNet(args.dset_rootpath, n_experiences=20, return_task_id=False, seed=seed, fixed_class_order=[i for i in range(100)]) inputsize = (3, 84, 84) else: raise ValueError("Wrong scenario name.") print(f"Scenario = {args.scenario}") if args.model == 'simple_mlp': model = MyMLP(input_size=inputsize, hidden_size=args.hs) elif args.model == 'resnet18': if not args.stable_sgd: assert args.drop_prob == 0 model = ResNet18(inputsize, scenario.n_classes, drop_prob=args.drop_prob) criterion = torch.nn.CrossEntropyLoss() optimizer = torch.optim.SGD(model.parameters(), lr=args.lr) # Paths eval_results_file = eval_results_dir / f'seed={seed}.csv' # LOGGING tb_logger = TensorboardLogger(tb_log_dir=tb_log_dir, tb_log_exp_name=f'seed={seed}.pt') # log to Tensorboard print_logger = TextLogger() if args.disable_pbar else InteractiveLogger() # print to stdout eval_logger = EvalTextLogger(metric_filter=eval_metric, file=open(eval_results_file, 'a')) eval_result_files.append(eval_results_file) # METRICS eval_plugin = EvaluationPlugin( accuracy_metrics(experience=True, stream=True), loss_metrics(minibatch=True, experience=True), ExperienceForgetting(), # Test only StreamConfusionMatrix(num_classes=scenario.n_classes, save_image=True), # LOG OTHER STATS # timing_metrics(epoch=True, experience=False), # cpu_usage_metrics(experience=True), # DiskUsageMonitor(), # MinibatchMaxRAM(), # GpuUsageMonitor(0), loggers=[print_logger, tb_logger, eval_logger]) plugins = None if args.strategy == 'replay': plugins = [RehRevPlugin(n_total_memories=args.mem_size, mode=args.replay_mode, # STEP-BACK aversion_steps=args.aversion_steps, aversion_lr=args.aversion_lr, stable_sgd=args.stable_sgd, # Stable SGD lr_decay=args.lr_decay, init_epochs=args.init_epochs # First task epochs )] # CREATE THE STRATEGY INSTANCE (NAIVE) strategy = Naive(model, optimizer, criterion, train_epochs=args.epochs, device=device, train_mb_size=args.bs, evaluator=eval_plugin, plugins=plugins ) # train on the selected scenario with the chosen strategy print('Starting experiment...') for experience in scenario.train_stream: if experience.current_experience == args.until_task: print("CUTTING OF TRAINING AT TASK ", experience.current_experience) break else: print("Start training on step ", experience.current_experience) strategy.train(experience) print("End training on step ", experience.current_experience) print('Computing accuracy on the test set') res = strategy.eval(scenario.test_stream[:args.until_task]) # Gathered by EvalLogger final_results_file = eval_results_dir / f'seed_summary.pt' stat_summarize(eval_result_files, final_results_file) print(f"[FILE:TB-RESULTS]: {tb_log_dir}") print(f"[FILE:FINAL-RESULTS]: {final_results_file}") print("FINISHED SCRIPT")
def main(args): # --- CONFIG device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available() and args.cuda >= 0 else "cpu") # --------- # --- TRANSFORMATIONS train_transform = transforms.Compose([ RandomCrop(28, padding=4), ToTensor(), transforms.Normalize((0.1307, ), (0.3081, )) ]) test_transform = transforms.Compose( [ToTensor(), transforms.Normalize((0.1307, ), (0.3081, ))]) # --------- # --- SCENARIO CREATION mnist_train = MNIST('./data/mnist', train=True, download=True, transform=train_transform) mnist_test = MNIST('./data/mnist', train=False, download=True, transform=test_transform) scenario = nc_scenario(mnist_train, mnist_test, 5, task_labels=False, seed=1234) # --------- # MODEL CREATION model = SimpleMLP(num_classes=scenario.n_classes) interactive_logger = InteractiveLogger() wandb_logger = WandBLogger(init_kwargs={ "project": args.project, "name": args.run }) eval_plugin = EvaluationPlugin(accuracy_metrics(minibatch=True, epoch=True, epoch_running=True, experience=True, stream=True), loss_metrics(minibatch=True, epoch=True, epoch_running=True, experience=True, stream=True), ExperienceForgetting(), StreamConfusionMatrix(), cpu_usage_metrics(minibatch=True, epoch=True, experience=True, stream=True), timing_metrics(minibatch=True, epoch=True, experience=True, stream=True), ram_usage_metrics(every=0.5, minibatch=True, epoch=True, experience=True, stream=True), gpu_usage_metrics(args.cuda, every=0.5, minibatch=True, epoch=True, experience=True, stream=True), disk_usage_metrics(minibatch=True, epoch=True, experience=True, stream=True), MAC_metrics(minibatch=True, epoch=True, experience=True), loggers=[interactive_logger, wandb_logger]) # CREATE THE STRATEGY INSTANCE (NAIVE) cl_strategy = Naive(model, SGD(model.parameters(), lr=0.001, momentum=0.9), CrossEntropyLoss(), train_mb_size=100, train_epochs=4, eval_mb_size=100, device=device, evaluator=eval_plugin) # TRAINING LOOP print('Starting experiment...') results = [] for experience in scenario.train_stream: print("Start of experience: ", experience.current_experience) print("Current Classes: ", experience.classes_in_this_experience) cl_strategy.train(experience) print('Training completed') print('Computing accuracy on the whole test set') results.append(cl_strategy.eval(scenario.test_stream))