Ejemplo n.º 1
0
    config.restart = True

    # copy over new num_epochs and lr schedule
    config.num_epochs = given_config.num_epochs
    config.lr_schedule = given_config.lr_schedule
    config.save_progression = given_config.save_progression

    if not hasattr(config, "batchnorm_track"):
        config.batchnorm_track = True  # before we added in false option

    if not hasattr(config, "lamb_A"):
        config.lamb_A = config.lamb
        config.lamb_B = config.lamb

else:
    print("Config: %s" % config_to_str(config))


# Model ------------------------------------------------------------------------
def train(render_count=-1):
    dataloaders_head_A, dataloaders_head_B, \
    mapping_assignment_dataloader, mapping_test_dataloader = \
      cluster_twohead_create_dataloaders(config)

    net = archs.__dict__[config.arch](config)
    if config.restart:
        model_path = os.path.join(config.out_dir, net_name)
        net.load_state_dict(
            torch.load(model_path, map_location=lambda storage, loc: storage))

    net.cuda()
Ejemplo n.º 2
0
  config_name = "config.pickle"
  dict_name = "latest.pytorch"

  given_config = config
  reloaded_config_path = os.path.join(given_config.out_dir, config_name)
  print("Loading restarting config from: %s" % reloaded_config_path)
  with open(reloaded_config_path, "rb") as config_f:
    config = pickle.load(config_f)
  assert (config.model_ind == given_config.model_ind)
  config.restart = True

  # copy over new num_epochs and lr schedule
  config.num_epochs = given_config.num_epochs
  config.lr_schedule = given_config.lr_schedule
else:
  print("Given config: %s" % config_to_str(config))


# Model ------------------------------------------------------

def train():
  print("inside train")
  exit()
  dataloaders_head_A, mapping_assignment_dataloader, mapping_test_dataloader = \
    segmentation_create_dataloaders(config)
  dataloaders_head_B = dataloaders_head_A  # unlike for clustering datasets

  net = archs.__dict__[config.arch](config)
  if config.restart:
    dict = torch.load(os.path.join(config.out_dir, dict_name),
                      map_location=lambda storage, loc: storage)