Пример #1
0
def main(cuda: int):
    # --- CONFIG
    device = torch.device(
        f"cuda:{cuda}" if torch.cuda.is_available() else "cpu"
    )
    # --- SCENARIO CREATION
    scenario = SplitCIFAR10(n_experiences=2, seed=42)
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=scenario.n_classes, input_size=196608 // 64)

    # choose some metrics and evaluation method
    eval_plugin = EvaluationPlugin(
        accuracy_metrics(stream=True, experience=True),
        images_samples_metrics(
            on_train=True,
            on_eval=True,
            n_cols=10,
            n_rows=10,
        ),
        labels_repartition_metrics(
            # image_creator=repartition_bar_chart_image_creator,
            on_train=True,
            on_eval=True,
        ),
        loggers=[
            TensorboardLogger(f"tb_data/{datetime.now()}"),
            InteractiveLogger(),
        ],
    )

    # CREATE THE STRATEGY INSTANCE (NAIVE)
    cl_strategy = Naive(
        model,
        Adam(model.parameters()),
        train_mb_size=128,
        train_epochs=1,
        eval_mb_size=128,
        device=device,
        plugins=[ReplayPlugin(mem_size=1_000)],
        evaluator=eval_plugin,
    )

    # TRAINING LOOP
    for i, experience in enumerate(scenario.train_stream, 1):
        cl_strategy.train(experience)
        cl_strategy.eval(scenario.test_stream[:i])
Пример #2
0
def main(args):
    # --- CONFIG
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")
    # ---------

    # --- TRANSFORMATIONS
    train_transform = transforms.Compose([
        RandomCrop(28, padding=4),
        ToTensor(),
        transforms.Normalize((0.1307, ), (0.3081, ))
    ])
    test_transform = transforms.Compose(
        [ToTensor(), transforms.Normalize((0.1307, ), (0.3081, ))])
    # ---------

    # --- SCENARIO CREATION
    mnist_train = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                        train=True,
                        download=True,
                        transform=train_transform)
    mnist_test = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                       train=False,
                       download=True,
                       transform=test_transform)
    scenario = nc_benchmark(mnist_train,
                            mnist_test,
                            5,
                            task_labels=False,
                            seed=1234)
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=scenario.n_classes)

    interactive_logger = InteractiveLogger()
    tensorboard_logger = TensorboardLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(minibatch=True,
                         epoch=True,
                         epoch_running=True,
                         experience=True,
                         stream=True),
        loss_metrics(minibatch=True,
                     epoch=True,
                     epoch_running=True,
                     experience=True,
                     stream=True),
        forgetting_metrics(experience=True, stream=True),
        StreamConfusionMatrix(),
        cpu_usage_metrics(minibatch=True,
                          epoch=True,
                          experience=True,
                          stream=True),
        timing_metrics(minibatch=True,
                       epoch=True,
                       experience=True,
                       stream=True),
        ram_usage_metrics(every=0.5,
                          minibatch=True,
                          epoch=True,
                          experience=True,
                          stream=True),
        gpu_usage_metrics(args.cuda,
                          every=0.5,
                          minibatch=True,
                          epoch=True,
                          experience=True,
                          stream=True),
        disk_usage_metrics(minibatch=True,
                           epoch=True,
                           experience=True,
                           stream=True),
        MAC_metrics(minibatch=True, epoch=True, experience=True),
        loggers=[interactive_logger, tensorboard_logger])

    # CREATE THE STRATEGY INSTANCE (NAIVE)
    cl_strategy = Naive(model,
                        SGD(model.parameters(), lr=0.001, momentum=0.9),
                        CrossEntropyLoss(),
                        train_mb_size=100,
                        train_epochs=4,
                        eval_mb_size=100,
                        device=device,
                        evaluator=eval_plugin)

    # TRAINING LOOP
    print('Starting experiment...')
    results = []
    for experience in scenario.train_stream:
        print("Start of experience: ", experience.current_experience)
        print("Current Classes: ", experience.classes_in_this_experience)

        cl_strategy.train(experience)
        print('Training completed')

        print('Computing accuracy on the whole test set')
        results.append(cl_strategy.eval(scenario.test_stream))
Пример #3
0
def main(args):
    model = SimpleMLP(hidden_size=args.hs)
    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
    criterion = torch.nn.CrossEntropyLoss()

    # check if selected GPU is available or use CPU
    assert args.cuda == -1 or args.cuda >= 0, "cuda must be -1 or >= 0."
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")
    print(f"Using device: {device}")

    # create scenario
    if args.scenario == "pmnist":
        scenario = PermutedMNIST(n_experiences=args.permutations)
    elif args.scenario == "smnist":
        mnist_train = MNIST(
            root=expanduser("~") + "/.avalanche/data/mnist/",
            train=True,
            download=True,
            transform=ToTensor(),
        )
        mnist_test = MNIST(
            root=expanduser("~") + "/.avalanche/data/mnist/",
            train=False,
            download=True,
            transform=ToTensor(),
        )
        scenario = nc_benchmark(mnist_train,
                                mnist_test,
                                5,
                                task_labels=False,
                                seed=1234)
    else:
        raise ValueError("Wrong scenario name. Allowed pmnist, smnist.")

    # choose some metrics and evaluation method
    interactive_logger = InteractiveLogger()
    tensorboard_logger = TensorboardLogger()
    eval_plugin = EvaluationPlugin(
        accuracy_metrics(minibatch=True,
                         epoch=True,
                         experience=True,
                         stream=True),
        loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
        forgetting_metrics(experience=True, stream=True),
        bwt_metrics(experience=True, stream=True),
        loggers=[interactive_logger, tensorboard_logger],
    )

    # create strategy
    strategy = EWC(
        model,
        optimizer,
        criterion,
        args.ewc_lambda,
        args.ewc_mode,
        decay_factor=args.decay_factor,
        train_epochs=args.epochs,
        device=device,
        train_mb_size=args.minibatch_size,
        evaluator=eval_plugin,
    )

    # train on the selected scenario with the chosen strategy
    print("Starting experiment...")
    results = []
    for experience in scenario.train_stream:
        print("Start training on experience ", experience.current_experience)

        strategy.train(experience)
        print("End training on experience", experience.current_experience)
        print("Computing accuracy on the test set")
        results.append(strategy.eval(scenario.test_stream[:]))
Пример #4
0
task_order_list = [perm]

dataset = task_ordering(task_order_list[0])

generic_scenario = tensor_scenario(
    train_data_x=dataset[0],
    train_data_y=dataset[1],
    test_data_x=dataset[2],
    test_data_y=dataset[3],
    task_labels=[
        0 for key in task_order_list[0].keys()
    ],  # shouldn't provide task ID for inference
)

# log to Tensorboard
tb_logger = TensorboardLogger(f"./tb_data/{cur_time}-SimpleMLP/")

# log to text file
text_logger = TextLogger(open(f"./logs/{cur_time}-SimpleMLP.txt", "w+"))

# print to stdout
interactive_logger = InteractiveLogger()

eval_plugin = EvaluationPlugin(
    accuracy_metrics(minibatch=True, epoch=True, experience=True, stream=True),
    loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
    timing_metrics(epoch=True, epoch_running=True),
    ExperienceForgetting(),
    cpu_usage_metrics(experience=True),
    StreamConfusionMatrix(num_classes=2, save_image=False),
    disk_usage_metrics(minibatch=True, epoch=True, experience=True, stream=True),
Пример #5
0
for task_order in range(len(task_order_list)):
    print("Current task order processing : ", task_order + 1)
    dataset = task_ordering(task_order_list[task_order])

    generic_scenario = tensor_scenario(
        train_data_x=dataset[0],
        train_data_y=dataset[1],
        test_data_x=dataset[2],
        test_data_y=dataset[3],
        task_labels=[0 for key in task_order_list[i].keys()
                     ],  # shouldn't provide task ID for inference
    )

    # log to Tensorboard
    tb_logger = TensorboardLogger(
        f"./tb_data/{cur_time}_CNN1D_0inTask{task_order}/")

    # log to text file
    text_logger = TextLogger(
        open(f"./logs/{cur_time}_CNN1D_0inTask{task_order}.txt", "w+"))

    # print to stdout
    interactive_logger = InteractiveLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(minibatch=True,
                         epoch=True,
                         experience=True,
                         stream=True),
        loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
        timing_metrics(epoch=True, epoch_running=True),
Пример #6
0
 def test_tensorboard_logger(self):
     logp = TensorboardLogger(self.logdir)
     self._test_logger(logp)
for task_order in range(len(task_order_list)):
    print("Current task order processing ", task_order + 1)
    dataset = task_ordering(task_order_list[task_order])

    generic_scenario = tensor_scenario(
        train_data_x=dataset[0],
        train_data_y=dataset[1],
        test_data_x=dataset[2],
        test_data_y=dataset[3],
        task_labels=[0 for key in task_order_list[task_order].keys()
                     ],  # shouldn't provide task ID for inference
    )

    # log to Tensorboard
    tb_logger = TensorboardLogger(
        f"./tb_data/{cur_time}_CNN2D_ClassInc_0_in_task{task_order+1}/")

    # log to text file
    text_logger = TextLogger(
        open(f"./logs/{cur_time}_CNN2D_ClassInc_0_in_task{task_order+1}.txt",
             "w+"))

    # print to stdout
    interactive_logger = InteractiveLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(minibatch=True,
                         epoch=True,
                         experience=True,
                         stream=True),
        loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
Пример #8
0
def main():
    args = parser.parse_args()
    args.cuda = args.cuda == 'yes'
    args.disable_pbar = args.disable_pbar == 'yes'
    args.stable_sgd = args.stable_sgd == 'yes'
    print(f"args={vars(args)}")

    device = torch.device("cuda:0" if torch.cuda.is_available() and args.cuda else "cpu")
    print(f'Using device: {device}')

    # unique identifier
    uid = uuid.uuid4().hex if args.uid is None else args.uid
    now = str(datetime.datetime.now().date()) + "_" + ':'.join(str(datetime.datetime.now().time()).split(':')[:-1])
    runname = 'T={}_id={}'.format(now, uid) if not args.resume else args.resume

    # Paths
    setupname = [args.strategy, args.exp_name, args.model, args.scenario]
    parentdir = os.path.join(args.save_path, '_'.join(setupname))
    results_path = Path(os.path.join(parentdir, runname))
    results_path.mkdir(parents=True, exist_ok=True)
    tb_log_dir = os.path.join(results_path, 'tb_run')  # Group all runs

    # Eval results
    eval_metric = 'Top1_Acc_Stream/eval_phase/test_stream'
    eval_results_dir = results_path / eval_metric.split('/')[0]
    eval_results_dir.mkdir(parents=True, exist_ok=True)

    eval_result_files = []  # To avg over seeds
    seeds = [args.seed] if args.seed is not None else list(range(args.n_seeds))
    for seed in seeds:
        # initialize seeds
        print("STARTING SEED {}/{}".format(seed, len(seeds) - 1))

        set_seed(seed)

        # create scenario
        if args.scenario == 'smnist':
            inputsize = 28 * 28
            scenario = SplitMNIST(n_experiences=5, return_task_id=False, seed=seed,
                                  fixed_class_order=[i for i in range(10)])
        elif args.scenario == 'CIFAR10':
            scenario = SplitCIFAR10(n_experiences=5, return_task_id=False, seed=seed,
                                    fixed_class_order=[i for i in range(10)])
            inputsize = (3, 32, 32)
        elif args.scenario == 'miniimgnet':
            scenario = SplitMiniImageNet(args.dset_rootpath, n_experiences=20, return_task_id=False, seed=seed,
                                         fixed_class_order=[i for i in range(100)])
            inputsize = (3, 84, 84)
        else:
            raise ValueError("Wrong scenario name.")
        print(f"Scenario = {args.scenario}")

        if args.model == 'simple_mlp':
            model = MyMLP(input_size=inputsize, hidden_size=args.hs)
        elif args.model == 'resnet18':
            if not args.stable_sgd:
                assert args.drop_prob == 0
            model = ResNet18(inputsize, scenario.n_classes, drop_prob=args.drop_prob)

        criterion = torch.nn.CrossEntropyLoss()
        optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)

        # Paths
        eval_results_file = eval_results_dir / f'seed={seed}.csv'

        # LOGGING
        tb_logger = TensorboardLogger(tb_log_dir=tb_log_dir, tb_log_exp_name=f'seed={seed}.pt')  # log to Tensorboard
        print_logger = TextLogger() if args.disable_pbar else InteractiveLogger()  # print to stdout
        eval_logger = EvalTextLogger(metric_filter=eval_metric, file=open(eval_results_file, 'a'))
        eval_result_files.append(eval_results_file)

        # METRICS
        eval_plugin = EvaluationPlugin(
            accuracy_metrics(experience=True, stream=True),
            loss_metrics(minibatch=True, experience=True),
            ExperienceForgetting(),  # Test only
            StreamConfusionMatrix(num_classes=scenario.n_classes, save_image=True),

            # LOG OTHER STATS
            # timing_metrics(epoch=True, experience=False),
            # cpu_usage_metrics(experience=True),
            # DiskUsageMonitor(),
            # MinibatchMaxRAM(),
            # GpuUsageMonitor(0),
            loggers=[print_logger, tb_logger, eval_logger])

        plugins = None
        if args.strategy == 'replay':
            plugins = [RehRevPlugin(n_total_memories=args.mem_size,
                                    mode=args.replay_mode,  # STEP-BACK
                                    aversion_steps=args.aversion_steps,
                                    aversion_lr=args.aversion_lr,
                                    stable_sgd=args.stable_sgd,  # Stable SGD
                                    lr_decay=args.lr_decay,
                                    init_epochs=args.init_epochs  # First task epochs
                                    )]

        # CREATE THE STRATEGY INSTANCE (NAIVE)
        strategy = Naive(model, optimizer, criterion,
                         train_epochs=args.epochs, device=device,
                         train_mb_size=args.bs, evaluator=eval_plugin,
                         plugins=plugins
                         )

        # train on the selected scenario with the chosen strategy
        print('Starting experiment...')
        for experience in scenario.train_stream:
            if experience.current_experience == args.until_task:
                print("CUTTING OF TRAINING AT TASK ", experience.current_experience)
                break
            else:
                print("Start training on step ", experience.current_experience)

            strategy.train(experience)
            print("End training on step ", experience.current_experience)
            print('Computing accuracy on the test set')
            res = strategy.eval(scenario.test_stream[:args.until_task])  # Gathered by EvalLogger

    final_results_file = eval_results_dir / f'seed_summary.pt'
    stat_summarize(eval_result_files, final_results_file)
    print(f"[FILE:TB-RESULTS]: {tb_log_dir}")
    print(f"[FILE:FINAL-RESULTS]: {final_results_file}")
    print("FINISHED SCRIPT")
Пример #9
0
    transforms.Resize(224),
    transforms.RandomCrop(224),
    transforms.ToTensor(),
    normalize,
])
test_transform = transforms.Compose([
    transforms.Resize(224),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    normalize,
])

root = ".."

# log to Tensorboard
tb_logger = TensorboardLogger(root)

# log to text file
text_logger = TextLogger(open(f"{root}/log.txt", "w"))

# print to stdout
interactive_logger = InteractiveLogger()

eval_plugin = EvaluationPlugin(
    accuracy_metrics(minibatch=True, epoch=True, experience=True, stream=True),
    loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
    timing_metrics(epoch=True, epoch_running=True),
    forgetting_metrics(experience=True, stream=True),
    cpu_usage_metrics(experience=True),
    confusion_matrix_metrics(num_classes=NUM_CLASSES[data_name],
                             save_image=False,
Пример #10
0
scenario_custom_task_labels = dataset_benchmark(training_datasets,
                                                test_datasets)

scenario = scenario_custom_task_labels

if (args.grow_classifier):
    print("using incremental classifier")
    model = DCNNNoVAEIncremental(num_classes=2)
else:
    model = DCNNNoVAE(num_classes=5)  # 5 because I happen to know this

# log to Tensorboard
path = args.tb_log_dir + "/" + ("".join(str(
    datetime.now()).split(" "))) + "_" + args.cl_strategy
tb_logger = TensorboardLogger(tb_log_dir=path)
# log to text file
text_logger = TextLogger(open('log.txt', 'a'))
# print to stdout
interactive_logger = InteractiveLogger()
eval_plugin = EvaluationPlugin(
    accuracy_metrics(minibatch=False,
                     epoch=False,
                     experience=True,
                     stream=True),
    #loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
    #timing_metrics(epoch=True),
    #cpu_usage_metrics(experience=True),
    #forgetting_metrics(experience=True, stream=True),
    #StreamConfusionMatrix(num_classes=5, save_image=False),
    StreamConfusionMatrix(save_image=False),
Пример #11
0
    dataset = task_ordering(task_order_list[task_order])
    print('shape debug: ',dataset[0][0].shape,dataset[1][0].shape,dataset[2][0].shape,dataset[3][0].shape)
    print('shape of target: ',type(dataset[1][0][0]),dataset[1][0][0])
    generic_scenario = tensor_scenario(
        train_data_x=dataset[0],
        train_data_y=dataset[1],
        test_data_x=dataset[2],
        test_data_y=dataset[3],
        task_labels=[
            0 for key in task_order_list[task_order].keys()
        ],  # shouldn't provide task ID for inference
    )
    # log to Tensorboard

    tb_logger = TensorboardLogger(
        f"./tb_data/{cur_time}_simple_mlp_task_0_in_task_{task_order}/"
    )

    # log to text file
    text_logger = TextLogger(
        open(
            f"./logs/{cur_time}_simple_mlp_task_0_in_task{task_order}.txt", "w+")
    )

    # print to stdout
    interactive_logger = InteractiveLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(minibatch=True, epoch=True,
                         experience=True, stream=True),
        loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
Пример #12
0
dataset = task_ordering(perm)

generic_scenario = tensor_scenario(
    train_data_x=dataset[0],
    train_data_y=dataset[1],
    test_data_x=dataset[2],
    test_data_y=dataset[3],
    task_labels=[0 for key in perm.keys()],
)

# Model Creation
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = SimpleMLP(num_classes=2, input_size=70, hidden_size=100)

# log to Tensorboard
tb_logger = TensorboardLogger(f"./tb_data/{cur_time}-simpleMLP_Domain/")

# log to text file
text_logger = TextLogger(open(f"./logs/{cur_time}-simpleMLP_Domain.txt", "w+"))

# print to stdout
interactive_logger = InteractiveLogger()

eval_plugin = EvaluationPlugin(
    accuracy_metrics(minibatch=True, epoch=True, experience=True, stream=True),
    loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
    timing_metrics(epoch=True, epoch_running=True),
    ExperienceForgetting(),
    cpu_usage_metrics(experience=True),
    StreamConfusionMatrix(num_classes=2, save_image=False),
    disk_usage_metrics(minibatch=True, epoch=True, experience=True, stream=True),
Пример #13
0
for task_order in range(len(task_order_list)):
    print("Current task order processing : ", task_order + 1)
    dataset = task_ordering(task_order_list[task_order])

    generic_scenario = tensor_scenario(
        train_data_x=dataset[0],
        train_data_y=dataset[1],
        test_data_x=dataset[2],
        test_data_y=dataset[3],
        task_labels=[
            0 for key in task_order_list[task_order].keys()
        ],  # shouldn't provide task ID for inference
    )

    # log to Tensorboard
    tb_logger = TensorboardLogger(f"./tb_data/{cur_time}_CNN2D_0task_{task_order+1}/")

    # log to text file
    text_logger = TextLogger(
        open(f"./logs/{cur_time}_CNN2D_0task_{task_order+1}.txt", "w+")
    )

    # print to stdout
    interactive_logger = InteractiveLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(minibatch=True, epoch=True, experience=True, stream=True),
        loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
        timing_metrics(epoch=True, epoch_running=True),
        ExperienceForgetting(),
        cpu_usage_metrics(experience=True),