def train():
    _, _, relation_data, profile_data, _ = utils.dataloader.load_data(
        args.i, train=True)

    agglo = relations_agglomerator()

    agglo.train(relation_data, profile_data)

    save_model("relation_agglo", args.o, agglo)
    print("Model saved")
Exemple #2
0
    model = None
    if MODEL_TOKEN == 'sahp':
        with autograd.detect_anomaly():
            params = args, process_dim, device, tmax, \
                     train_times_tensor, train_seq_types, train_seq_lengths, \
                     dev_times_tensor, dev_seq_types, dev_seq_lengths, \
                     test_times_tensor, test_seq_types, test_seq_lengths, \
                     BATCH_SIZE, EPOCHS, USE_CUDA
            model = train_eval_sahp(params)

    else:
        exit()

    if args.save_model:
        # Model file dump
        SAVED_MODELS_PATH = os.path.abspath('saved_models')
        os.makedirs(SAVED_MODELS_PATH, exist_ok=True)
        # print("Saved models directory: {}".format(SAVED_MODELS_PATH))

        date_format = "%Y%m%d-%H%M%S"
        now_timestamp = datetime.datetime.now().strftime(date_format)
        extra_tag = "{}".format(args.task)
        filename_base = "{}-{}_hidden{}-{}".format(MODEL_TOKEN, extra_tag,
                                                   hidden_size, now_timestamp)
        from utils.save_model import save_model
        save_model(model, chosen_file, extra_tag, hidden_size, now_timestamp,
                   MODEL_TOKEN)

    print('Done! time elapsed %.2f sec for %d epoches' %
          (time.time() - start_time, EPOCHS))
def save_model_(dir, method, model, timestamp):
    folder = os.path.join(dir, method['name'] + '_' + timestamp)
    save_model(model, folder)
            create_dir(method_model_dir)

        if SAVE_PREDS:
            create_dir(method_pred_dir)

        start_time = time()
        classifier, hist = train(method, train_provider, val_provider)
        train_time = (time() - start_time) / num_epochs

        # save_train_results(method_results_dir, hist, train_time)

        start_time = time()
        acc, per_class_acc, mean_class_acc, mIoU, per_class_iou, mean_class_iou, conf_mat, data_, part_labels_, \
           pred_part_labels_ = \
            test(classifier, test_provider, method=method, dataset=dataset)
        test_time = (time() - start_time)

        save_test_results(method_results_dir, acc, per_class_acc,
                          mean_class_acc, mIoU, per_class_iou, mean_class_iou,
                          conf_mat, test_time)

        # save_results(method_results_dir, hist, conf_mat, test_acc, train_time, test_time)

        if SAVE_PREDS:
            save_part_labels(method_pred_dir,
                             dataset['name'] + '_' + method['name'],
                             pred_part_labels_, part_labels_, data_)

        if SAVE_MODELS:
            save_model(method_model_dir, classifier)
Exemple #5
0
elif args.model == 'lstm':
    loss = train_lstm(
        model,
        optimizer,
        seq_times,
        seq_types,
        seq_lengths,
        -1,  # tmax actually doesn't matter
        BATCH_SIZE,
        EPOCHS,
        use_cuda=args.cuda)

# Model file dump
SAVED_MODELS_PATH = os.path.abspath('saved_models')
os.makedirs(SAVED_MODELS_PATH, exist_ok=True)
# print("Saved models directory: {}".format(SAVED_MODELS_PATH))

date_format = "%Y%m%d-%H%M%S"
now_timestamp = datetime.datetime.now().strftime(date_format)
extra_tag = "{}d".format(process_dim)
filename_base = "{}-{}_hidden{}-{}".format(MODEL_NAME, extra_tag, hidden_size,
                                           now_timestamp)

save_model(model, data_files, extra_tag, hidden_size, now_timestamp,
           MODEL_NAME)

# Plot loss and save
filename_loss_plot = "logs/loss_plot_" + filename_base + ".png"
fig = plot_loss(EPOCHS, loss)
fig.savefig(filename_loss_plot)