예제 #1
0
import torch

torch.manual_seed(1)
from model import Trainer

if __name__ == '__main__':
    FEATURES = 4
    WIDTH = 4
    DEVICE = torch.device('cpu')
    dataset_inputs = torch.randn((2 ** 20, 16, FEATURES)) * WIDTH
    dataset_outputs = dataset_inputs.cos().mean(-1) * dataset_inputs.square().mean(-1).sqrt()
    model = Trainer(FEATURES, (dataset_inputs, dataset_outputs), DEVICE, max_loss=(WIDTH*FEATURES)**2)
    model.fit(100)
예제 #2
0
파일: run.py 프로젝트: boreshkinai/fc-gaga
print("*********************************")

dataset = Dataset(name=hyperparams_dict["dataset"],
                  horizon=hyperparams_dict["horizon"],
                  history_length=hyperparams_dict["history_length"],
                  path=DATADIR)

hyperparams_dict["num_nodes"] = dataset.num_nodes
hyperparams = Parameters(**hyperparams_dict)

print("*********************************")
print("TRAINING MODELS")
print("*********************************")

trainer = Trainer(hyperparams=hyperparams, logdir=LOGDIR)
trainer.fit(dataset=dataset)

print("*********************************")
print("COMPUTING METRICS")
print("*********************************")

early_stop_mae_h_repeats = dict()
early_stop_mape_h_repeats = dict()
early_stop_rmse_h_repeats = dict()
early_stop_mae_h_ave = dict()
early_stop_mape_h_ave = dict()
early_stop_rmse_h_ave = dict()
for i, h in enumerate(trainer.history):
    early_stop_idx = np.argmin(h['mae_val'])
    early_stop_mae = np.round(h['mae_test'][early_stop_idx], decimals=3)
    print(f"Early stop test error model {trainer.folder_names[i]}:", "Avg MAE",
예제 #3
0
                         log_dir=log_dir,
                         num_resblock=args.num_resblock)
    trainer.summary()

    try:
        if weights_path is not None:
            print('loading weights')
            trainer.load_checkpoint(weights_path)
        else:
            print('no weights for initalization are available')
    except Exception as e:
        print(e)

    if args.train_generator:
        trainer.fit(train_dataset=train_ds,
                    valid_dataset=valid_ds,
                    epochs=args.epochs,
                    valid_lr=valid_lr,
                    valid_hr=valid_hr)
        print('training finished, saving model now')
        trainer.save_model('_only_generator')

    if args.train_gan:
        trainer.train_gan(train_dataset=train_ds,
                          valid_dataset=valid_ds,
                          epochs=args.epochs,
                          valid_lr=valid_lr,
                          valid_hr=valid_hr)
        print('training finished, saving model now')
        trainer.save_model()
예제 #4
0
    dataset, target = readTrainCSV(filecsv, fea_sel=0)

    # Split
    from sklearn.model_selection import train_test_split
    X_train, X_test, y_train, y_test = train_test_split(
        dataset, target, test_size=0.2, random_state=0)
    X_train = np.asarray(X_train).astype(np.float)
    X_test = np.asarray(X_test).astype(np.float)
    y_train = np.asarray(y_train).astype(np.float)
    y_test = np.asarray(y_test).astype(np.float)



    model_name = 'SVM'

    classifer = Trainer(model_name, kernel='rbf', C=1, gamma='scale')

    # classifer.load_model('kNN_classifier.txt')


    # parameters = {'kernel':('linear', 'rbf'), 'C':[1,2]}

    # classifer.gridsearchCV(parameters)

    classifer.fit(X_train, y_train)
    classifer.predict(X_test)
    classifer.report(y_test)
    print(classifer.model)
    classifer.save_model('SVM_classifier.sav')