Ejemplo n.º 1
0
inputs, targets = inputs.to(device), targets.to(device)
# It is important to permute the dimensions of the input!!
inputs = Variable(inputs.permute(0, 2, 1)).contiguous()

output = model(inputs)

# %%
# ---------------------------------------------------------------------
# TRAING THE MODEL
# ---------------------------------------------------------------------
# Make sure the model archiecture loaded in train_cgm matches the hyper configuration'
pre_train_model = model_path / 'model-folder-name'
train_cgm(config,
          max_epochs=30,
          grace_period=5,
          n_epochs_stop=10,
          data_obj=data_obj,
          useRayTune=False,
          checkpoint_dir=pre_train_model)

# Load best model
model_state, optimizer_state = torch.load(code_path / 'src' /
                                          'model_state_tmp' / 'checkpoint')
model.load_state_dict(model_state, strict=False)

# Copy the trained model to model path
copyfile(code_path / 'src' / 'model_state_tmp' / 'checkpoint',
         model_path_id / 'checkpoint')

with open(code_path / 'src' / 'model_state_tmp' / 'hyperPars.json', 'w') as fp:
    json.dump(config, fp)
Ejemplo n.º 2
0
    # Build model
    with open(par_file) as json_file:
        optHyps = json.load(open(par_file))

    model = DilatedNet(h1=optHyps["h1"], h2=optHyps["h2"])

    data_obj = dataLoader(data_pars,
                          features,
                          n_steps_past=16,
                          n_steps_future=6,
                          allowed_gap=10,
                          scaler=StandardScaler())

    train_cgm(optHyps,
              max_epochs=MAX_NUM_EPOCHS_FINAL,
              grace_period=GRACE_PERIOD_FINAL,
              n_epochs_stop=N_EPOCHS_STOP_FINAL,
              data_obj=data_obj_hyperOpt,
              useRayTune=False)
    # train_cgm(optHyps, max_epochs= 3, grace_period=1, n_epochs_stop=2, data_obj=data_obj, useRayTune=False)

    # Load best model state
    model_state, optimizer_state = torch.load(code_path / 'src' /
                                              'model_state_tmp' / 'checkpoint')
    model.load_state_dict(model_state)

    current_time = datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')
    user = getpass.getuser()
    model_id = f'id_{current_time}_{user}'
    model_id = experiment_id

    model_figure_path = figure_path / model_id
Ejemplo n.º 3
0
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
inputs, targets = data
inputs, targets = inputs.to(device), targets.to(device)
# It is important to permute the dimensions of the input!!
inputs = Variable(inputs.permute(0, 2, 1)).contiguous()

output = model(inputs)

# %%
# ---------------------------------------------------------------------
# TRAING THE MODEL
# ---------------------------------------------------------------------
# Make sure the model archiecture loaded in train_cgm matches the hyper configuration
train_cgm(config,
          max_epochs=1,
          grace_period=5,
          n_epochs_stop=20,
          data_obj=data_obj,
          useRayTune=False)

model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = [np.prod(p.size()) for p in model_parameters]
print(params)
# Load best model
model_state, optimizer_state = torch.load(code_path / 'src' /
                                          'model_state_tmp' / 'checkpoint')
# model_state, optimizer_state = torch.load(best_epoch_checkpoint_file_name)
model.load_state_dict(model_state, strict=False)

# Copy the trained model to model path
copyfile(code_path / 'src' / 'model_state_tmp' / 'checkpoint',
         model_path_id / 'checkpoint')