config = pickle.load(config_f)
    assert (config.model_ind == given_config.model_ind)
    config.restart = True
    config.restart_from_best = given_config.restart_from_best

    # copy over new num_epochs and lr schedule
    config.num_epochs = given_config.num_epochs
    config.lr_schedule = given_config.lr_schedule

else:
    print("Config: %s" % config_to_str(config))

# Model ------------------------------------------------------------------------

dataloaders, mapping_assignment_dataloader, mapping_test_dataloader = \
    cluster_create_dataloaders(config)

net = archs.__dict__[config.arch](config)
if config.restart:
    model_path = os.path.join(config.out_dir, net_name)
    net.load_state_dict(
        torch.load(model_path, map_location=lambda storage, loc: storage))
net.cuda()
net = torch.nn.DataParallel(net)
net.train()

optimiser = get_opt(config.opt)(net.module.parameters(), lr=config.lr)
if config.restart:
    optimiser.load_state_dict(
        torch.load(os.path.join(config.out_dir, opt_name)))
        config = pickle.load(config_f)
    assert config.model_ind == given_config.model_ind
    config.restart = True
    config.restart_from_best = given_config.restart_from_best

    # copy over new num_epochs and lr schedule
    config.num_epochs = given_config.num_epochs
    config.lr_schedule = given_config.lr_schedule

else:
    print("Config: %s" % config_to_str(config))

# Model ------------------------------------------------------------------------

dataloaders, mapping_assignment_dataloader, mapping_test_dataloader = cluster_create_dataloaders(
    config
)

net = archs.__dict__[config.arch](config)  # type: ignore
if config.restart:
    assert net_name is not None
    model_path = os.path.join(config.out_dir, net_name)
    net.load_state_dict(
        torch.load(model_path, map_location=lambda storage, loc: storage)
    )
net.cuda()
net = torch.nn.DataParallel(net)
net.train()

optimiser = get_opt(config.opt)(net.module.parameters(), lr=config.lr)
if config.restart: