def test_gdumb(self): # SIT scenario model, optimizer, criterion, my_nc_benchmark = self.init_sit() strategy = GDumb( model, optimizer, criterion, mem_size=200, train_mb_size=64, device=self.device, eval_mb_size=50, train_epochs=2, ) self.run_strategy(my_nc_benchmark, strategy) # MT scenario strategy = GDumb( model, optimizer, criterion, mem_size=200, train_mb_size=64, device=self.device, eval_mb_size=50, train_epochs=2, ) benchmark = self.load_benchmark(use_task_labels=True) self.run_strategy(benchmark, strategy)
def test_gdumb(self): model = self.get_model(fast_test=self.fast_test) optimizer = SGD(model.parameters(), lr=1e-3) criterion = CrossEntropyLoss() # SIT scenario my_nc_scenario = self.load_scenario(fast_test=self.fast_test) strategy = GDumb(model, optimizer, criterion, mem_size=200, train_mb_size=64, device=self.device, eval_mb_size=50, train_epochs=2) self.run_strategy(my_nc_scenario, strategy) # MT scenario strategy = GDumb(model, optimizer, criterion, mem_size=200, train_mb_size=64, device=self.device, eval_mb_size=50, train_epochs=2) scenario = self.load_scenario(fast_test=self.fast_test, use_task_labels=True) self.run_strategy(scenario, strategy)
elif (args.cl_strategy == "AR1"): cl_strategy = AR1(model, Adam(model.parameters(), lr=0.001), CrossEntropyLoss(), ewc_lambda=0.5, train_mb_size=args.batch_size, train_epochs=args.num_epochs, eval_mb_size=args.batch_size * 2, evaluator=eval_plugin, device=device) elif (args.cl_strategy == "GDumb"): cl_strategy = GDumb(model, Adam(model.parameters(), lr=0.001), CrossEntropyLoss(), mem_size=200, train_mb_size=args.batch_size, train_epochs=args.num_epochs, eval_mb_size=args.batch_size * 2, evaluator=eval_plugin, device=device) else: print("Strategy is not implemented!") raise NotImplementedError # TRAINING LOOP print('Starting experiment...') results = [] for experience in scenario.train_stream: curr_experience = experience.current_experience print("Start of experience: ", curr_experience) print("Current Classes: ", experience.classes_in_this_experience)