def load_model(lr):

    model = MultiLayerPerceptron(103)
    loss_fnc = nn.MSELoss()
    optimizer = optim.SGD(model.parameters(), lr=lr)

    return model, loss_fnc, optimizer
Exemple #2
0
def load_model(lr,hidden_layer,activation):

    ######

    # 3.4 YOUR CODE HERE
    model = MultiLayerPerceptron(feat_train.shape[1],hidden_layer,activation)
    loss_fnc = torch.nn.BCELoss()
    optimizer = torch.optim.SGD(model.parameters(), lr = lr)
    ######

    return model, loss_fnc, optimizer
Exemple #3
0
def load_model(lr):

    ######

    # 4.4 YOUR CODE HERE
    model = MultiLayerPerceptron(103)
    loss_fnc = torch.nn.MSELoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=lr)
    ######

    return model, loss_fnc, optimizer
Exemple #4
0
def load_model(lr=0.1):

    ######

    model = MultiLayerPerceptron()
    loss_fnc = torch.nn.MSELoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=lr)

    ######

    return model, loss_fnc, optimizer
Exemple #5
0
def load_model(learn_rate, activation):

    ######

    # 3.4 YOUR CODE HERE

    model = MultiLayerPerceptron(103, activation)
    loss_fnc = torch.nn.BCELoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=learn_rate)

    ######

    return model, loss_fnc, optimizer
Exemple #6
0
def load_model(lr, hid_lay_size):

    ######

    # 3.4 YOUR CODE HERE
    model = MultiLayerPerceptron(
        final_features_data.shape[1],
        hid_lay_size)  #initialize with #of features it takes in
    loss_fnc = torch.nn.BCELoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=lr)
    ######

    return model, loss_fnc, optimizer
Exemple #7
0
def load_data(batch_size, lr):
    train_dataset = AdultDataset(X_train, y_train)
    train_loader = DataLoader(train_dataset,
                              batch_size=batch_size,
                              num_workers=1,
                              shuffle=True)
    test_dataset = AdultDataset(X_test, y_test)
    test_loader = DataLoader(test_dataset,
                             batch_size=batch_size,
                             num_workers=1,
                             shuffle=False)

    loss_fnc = torch.nn.BCELoss()
    model = MultiLayerPerceptron(X_train.shape[1])
    optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad,
                                       model.parameters()),
                                lr=lr)

    return train_loader, test_loader, model, loss_fnc, optimizer
Exemple #8
0
batch_size = 1000
epochs = 1000
lr = 0.15

train_data, validation_data, train_label, valid_label = train_test_split(
    picks_onehot, radiant_win_onehot, test_size=0.2, random_state=0)
train_set = DotaDataset(train_data, train_label)
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
val_set = DotaDataset(validation_data, valid_label)
val_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False)

if torch.cuda.is_available():
    torch.set_default_tensor_type(torch.cuda.FloatTensor)

model = MultiLayerPerceptron(258)
model.cuda()
loss_fnc = torch.nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=lr)

training_accuracy = []
validation_accuracy = []
loss_record = []

for epoch in range(0, epochs):
    loss_epoch = 0
    x = 0
    for i, tbatch in enumerate(train_loader):
        feats, label = tbatch
        optimizer.zero_grad()
        prediction = model(feats.float())
Exemple #9
0
def load_model(lr, h_layer_size):
    model = MultiLayerPerceptron(np.shape(training_data)[1], h_layer_size)
    loss_fnc = torch.nn.MSELoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=lr)

    return model, loss_fnc, optimizer
Exemple #10
0
def load_model(lr):
    # 3.4 YOUR CODE HERE
    loss_fnc = torch.nn.BCELoss()
    model = MultiLayerPerceptron(feat_train.shape[1])
    optimizer = torch.optim.adam(model.parameters(), lr=lr)
    return model, loss_fnc, optimizer
def load_model(lr, neuronNum=20, activationFunc=0):
    model = MultiLayerPerceptron(processedData.shape[1], neuronNum,
                                 activationFunc)
    loss_fnc = torch.nn.MSELoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=lr)
    return model, loss_fnc, optimizer
Exemple #12
0
                              shuffle=True,
                              num_workers=cfg.n_workers)
    val_loader = DataLoader(val_set,
                            batch_size=cfg.batch_size,
                            shuffle=False,
                            num_workers=cfg.n_workers,
                            drop_last=False)
    print(f'\nNo. Train: {len(train_set):6d}')
    print(f'No. Val:   {len(val_set):6d}')
    print(f'No. Test:  {len(test_set):6d}')

    scaler = StandardScaler().fit(train_set[:][0])
    model = MultiLayerPerceptron(
        in_dim=train_set[0][0].shape[0],
        out_dim=train_set[0][1].shape[0],
        n_layers=cfg.n_layers,
        n_units=cfg.n_units,
        dropout=cfg.dropout,
        shift=torch.from_numpy(scaler.mean_.astype(np.float32)),
        scale=torch.from_numpy(scaler.scale_.astype(np.float32))).to(device)
    print('\nModel:\n')
    print(model)
    wandb.watch(model)

    loss = nn.BCEWithLogitsLoss()
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=cfg.lr,
                                 weight_decay=cfg.weight_decay)
    trainer = create_supervised_trainer(model, optimizer, loss, device)
    RunningAverage(output_transform=lambda x: x).attach(trainer, 'loss')

    trainer.add_event_handler(
Exemple #13
0
def load_model(lr):
    x = len(train_data[0])
    model = MultiLayerPerceptron(x)
    loss_fnc = torch.nn.MSELoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=lr)
    return model, loss_fnc, optimizer