Ejemplo n.º 1
0
def pretrain(args,
             config,
             X_all,
             lead_time_idx,
             input_dim=None,
             onehot_dim=None):
    model = ModelWrapper(args, config, args.use_onehot, input_dim, onehot_dim,
                         lead_time_idx)
    model.train((lead_time_idx, X_all))
    return model
Ejemplo n.º 2
0
            else:
                aggregated_value["score"] = aggregated_value["accuracy"]

            if best_val_score[val] < aggregated_value["score"]:
                best_val_score[val] = aggregated_value["score"]

            aggregated_value["best_score"] = best_val_score[val]
            aggregated_value["val"] = val

            print(json.dumps(aggregated_value, sort_keys=True, indent=4),
                  flush=True)
            log_f.write(json.dumps(aggregated_value, sort_keys=True) + "\n")
            log_f.flush()

            print(f"################ {val} iteration end ################")


if model_config["mixed_precision"]:
    scaler = torch.cuda.amp.GradScaler()

idx = 0
best_val_score = {val: 0.0 for val in downsteam_task_config["val"]}

for epoch in range(downsteam_task_config["epoch"]):
    model.train()
    train()

    model.eval()
    with torch.no_grad():
        val()
Ejemplo n.º 3
0
from model import ModelWrapper


model_wrapper = ModelWrapper()
model_wrapper.train()