).to(device) if times: # OBSOLETE: DO NOT WORK ANYMORE, WILL CAUSE CRASHES TODO # overrides default mode ! # tests different values of workers and batch sizes to check which is the fastest num_workers = [16, 32, 64, 128] batch_sizes = [16, 32] perfs = [] for nw, bs in product(num_workers, batch_sizes): tl, vl, _ = create_loaders( data_path, train_size, bs, max_subj, ch_type, data_type, num_workers=nw, debug=debug, chunkload=chunkload, permute_labels=permute_labels, ) tpb, et = train(net, tl, vl, "", lr=learning_rate, timing=True) perfs.append((nw, bs, tpb, et)) for x in sorted(perfs, key=lambda x: x[-1]): logging.info(f"\n{x[0]} {x[1]} {nt(x[2])} {nt(x[3])}") else: # We create loaders and datasets (see dataloaders.py) trainloader, validloader, testloader = create_loaders(
logging.info(net) file_exists = False if os.path.exists(save_path + f"Guided_Bprop_{net_name}.mat"): logging.warning("GuidedBackprop for this architecture already exists") file_exists = True if mode == "overwrite" or not file_exists: # We create loaders and datasets (see dataloaders.py) _, validloader, _ = create_loaders( data_path, train_size, batch_size, max_subj, ch_type, data_type, seed=seed, num_workers=num_workers, chunkload=chunkload, include=(0, 1, 0), ages=ages, ) model_filepath = save_path + f"{net_name}.pt" logging.info(net.name) _, net_state, _ = load_checkpoint(model_filepath) net.load_state_dict(net_state) gbp = GuidedBackprop(net) gbps = []
).to(device) lin_size = input_size[0] * input_size[1] * input_size[2] if torchsum: logging.info(summary(net, (1, lin_size))) else: logging.info(net) # We create loaders and datasets (see dataloaders.py) trainloader, validloader, testloader = create_loaders( data_path, train_size, batch_size, max_subj, ch_type, data_type, seed=seed, num_workers=num_workers, chunkload=chunkload, debug=debug, printmem=printmem, ) if mode == "overwrite": save = True load = False elif mode in ("continue", "evaluate"): save = True load = True else: save = False
# num_workers=num_workers, # chunkload=chunkload, # debug=debug, # printmem=printmem, # include=(1, 1, 0), # ages=ages, # dattype="task", # ) rest_train, rest_valid, rest_test = create_loaders( data_path, train_size, batch_size, max_subj, ch_type, data_type, seed=seed, num_workers=num_workers, chunkload=chunkload, debug=debug, printmem=printmem, include=(1, 1, 0), ages=ages, ) # Testing wether ERM works as intended: train_loaders = [rest_train, rest_train] valid_loaders = [rest_valid, rest_valid] if torchsum: logging.info(summary(net, input_size)) else: logging.info(net)
nchan, linear, dropout, dropout_option, ).to(device) # We create loaders and datasets (see dataloaders.py) task_train, task_valid, _ = create_loaders( data_path, train_size, batch_size, max_subj, ch_type, data_type, seed=seed, num_workers=num_workers, chunkload=chunkload, debug=debug, printmem=printmem, include=(1, 1, 0), ages=ages, dattype="task", samples=samples, infinite=True, ) rest_train, rest_valid, _ = create_loaders( data_path, train_size, batch_size, max_subj, ch_type, data_type,
dropout=dropout, ).to(device) if times: # overrides default mode ! # tests different values of workers and batch sizes to check which is the fastest num_workers = [16, 32, 64, 128] batch_sizes = [16, 32] perfs = [] for nw, bs in product(num_workers, batch_sizes): tl, vl, _ = create_loaders( data_path, train_size, bs, max_subj, ch_type, data_type, num_workers=nw, debug=debug, chunkload=chunkload, ) tpb, et = train(net, tl, vl, "", lr=learning_rate, timing=True) perfs.append((nw, bs, tpb, et)) for x in sorted(perfs, key=lambda x: x[-1]): print("\n", (x[0], x[1], nt(x[2]), nt(x[3]))) else: print(net) # We create loaders and datasets (see dataloaders.py)