given_config = config
    reloaded_config_path = os.path.join(given_config.out_dir, config_name)
    print("Loading restarting config from: %s" % reloaded_config_path)
    with open(reloaded_config_path, "rb") as config_f:
        config = pickle.load(config_f)
    assert (config.model_ind == given_config.model_ind)
    config.restart = True
    config.restart_from_best = given_config.restart_from_best

    # copy over new num_epochs and lr schedule
    config.num_epochs = given_config.num_epochs
    config.lr_schedule = given_config.lr_schedule

else:
    print("Config: %s" % config_to_str(config))

# Model ------------------------------------------------------------------------

dataloaders, mapping_assignment_dataloader, mapping_test_dataloader = \
    cluster_create_dataloaders(config)

net = archs.__dict__[config.arch](config)
if config.restart:
    model_path = os.path.join(config.out_dir, net_name)
    net.load_state_dict(
        torch.load(model_path, map_location=lambda storage, loc: storage))
net.cuda()
net = torch.nn.DataParallel(net)
net.train()
示例#2
0
    config_name = "config.pickle"
    dict_name = "latest.pytorch"

    given_config = config
    reloaded_config_path = os.path.join(given_config.out_dir, config_name)
    print("Loading restarting config from: %s" % reloaded_config_path)
    with open(reloaded_config_path, "rb") as config_f:
        config = pickle.load(config_f)
    assert (config.model_ind == given_config.model_ind)
    config.restart = True

    # copy over new num_epochs and lr schedule
    config.num_epochs = given_config.num_epochs
    config.lr_schedule = given_config.lr_schedule
else:
    print("Given config: %s" % config_to_str(config))

# Model ------------------------------------------------------


def train():
    dataloaders_head_A, mapping_assignment_dataloader, mapping_test_dataloader = \
      segmentation_create_dataloaders(config)
    dataloaders_head_B = dataloaders_head_A  # unlike for clustering datasets

    net = archs.__dict__[config.arch](config)
    if config.restart:
        dict = torch.load(os.path.join(config.out_dir, dict_name),
                          map_location=lambda storage, loc: storage)
        net.load_state_dict(dict["net"])
    net.cuda()
示例#3
0
文件: cluster.py 项目: hendraet/IIC
def setup(config):
    if config.mode == "IID":
        assert ("TwoHead" in config.arch)
        # Exactly one config has to match the groundtruth k and all ks have to be bigger than gt_k
        assert any(k == config.gt_k for k in config.output_ks)
        assert all(k >= config.gt_k for k in config.output_ks)
        config.output_k = config.gt_k
        config.eval_mode = "hung"
        config.twohead = True
    elif config.mode == "IID+":
        assert len(
            config.output_ks) == 1 and config.output_ks[0] >= config.gt_k
        config.output_k = config.output_ks[0]
        config.eval_mode = "orig"
        config.twohead = False
        config.double_eval = False
    else:
        raise NotImplementedError

    if config.sobel:
        if not config.include_rgb:
            config.in_channels = 2
        else:
            config.in_channels = 5
    else:
        config.in_channels = 1
        config.train_partitions = [True, False]
        config.mapping_assignment_partitions = [True, False]
        config.mapping_test_partitions = [True, False]

    assert (config.batch_sz % config.num_dataloaders == 0)
    config.dataloader_batch_sz = config.batch_sz / config.num_dataloaders

    config.out_dir = os.path.join(config.out_root, str(config.model_ind))
    if not os.path.exists(config.out_dir):
        os.makedirs(config.out_dir)

    if config.restart:
        config_name = "config.pickle"
        net_name = "latest_net.pytorch"
        opt_name = "latest_optimiser.pytorch"

        if config.restart_from_best:
            config_name = "best_config.pickle"
            net_name = "best_net.pytorch"
            opt_name = "best_optimiser.pytorch"

        given_config = config
        reloaded_config_path = os.path.join(given_config.out_dir, config_name)
        print("Loading restarting config from: %s" % reloaded_config_path)
        with open(reloaded_config_path, "rb") as config_f:
            config = pickle.load(config_f)
        if hasattr(config, "num_sub_heads"):
            config.num_subheads = config.num_sub_heads
        assert given_config.test_code or (config.model_ind
                                          == given_config.model_ind)
        config.restart = True
        config.restart_from_best = given_config.restart_from_best

        if given_config.dataset_root is not None:
            config.dataset_root = given_config.dataset_root
        if given_config.out_root is not None:
            config.out_root = given_config.out_root
            config.out_dir = os.path.join(given_config.out_root,
                                          str(given_config.model_ind))

        # copy over new num_epochs and lr schedule
        config.num_epochs = given_config.num_epochs
        config.lr_schedule = given_config.lr_schedule
        config.save_progression = given_config.save_progression
        config.result_dir = given_config.result_dir

        if not hasattr(config, "cutout"):
            config.cutout = False
            config.cutout_p = 0.5
            config.cutout_max_box = 0.5

        if not hasattr(config, "batchnorm_track"):
            config.batchnorm_track = True  # before we added in false option

        config.plot_cluster_stats = given_config.plot_cluster_stats
        if hasattr(given_config, "batch_sz") and hasattr(
                given_config, "num_dataloaders"):
            assert (given_config.batch_sz % given_config.num_dataloaders == 0)
            config.batch_sz = given_config.batch_sz
            config.num_dataloaders = given_config.num_dataloaders
            config.dataloader_batch_sz = config.batch_sz / config.num_dataloaders
    else:
        print("Config: %s" % config_to_str(config))
        net_name = None
        opt_name = None

    net = archs.__dict__[config.arch](config)
    if config.restart:
        model_path = os.path.join(config.out_dir, net_name)
        print("Model path: %s" % model_path)
        net.load_state_dict(
            torch.load(model_path, map_location=lambda storage, loc: storage))

    net.cuda()
    net = torch.nn.DataParallel(net)
    net.train()

    optimiser = get_opt(config.opt)(net.module.parameters(), lr=config.lr)
    if config.restart:
        opt_path = os.path.join(config.out_dir, opt_name)
        optimiser.load_state_dict(torch.load(opt_path))

    return config, net, optimiser