Exemple #1
0
def train():
    model = get_model()
    print("Loaded model")

    X, y = data.get_validation_dataset()
    print("Loaded validation dataset")
    print("Total of", len(y) * 4)
    model.summary()

    print("Training model")
    checkpoint_path = "weights.{epoch:02d}-{val_loss:.2f}.hdf5"
    checkpoint = ModelCheckpoint(checkpoint_path,
                                 verbose=1,
                                 save_best_only=True)
    model.fit_generator(driving_data.generate_arrays_from_file(),
                        validation_data=(X, y),
                        samples_per_epoch=len(y) * 4,
                        nb_epoch=150,
                        verbose=2,
                        callbacks=[checkpoint])
from data import get_validation_dataset
import numpy as np
import torch as t
import matplotlib.pyplot as plt

train_dl = t.utils.data.DataLoader(get_validation_dataset(), batch_size=1)

a = 0.0
s = np.zeros(3)
s2 = np.zeros(3)
for x, _ in train_dl:
    x = x[0].cpu().numpy()
    a += np.prod(x.shape[1:])
    s += np.sum(x, axis=(1,2))
    s2 += np.sum(x**2, axis=(1,2))

assert x.shape[0] == 3 and x.shape[1] == 300 and x.shape[1] == 300, "your samples are not correctly shaped"

for i in range(3):
    assert s[i] > -a*0.09 and s[i] < a*0.09, "your normalization seems wrong"
    assert s2[i] > a*0.91 and s2[i] < a*1.09, "your normalization seems wrong"
import torch as t
import torchvision as tv
from data import get_train_dataset, get_validation_dataset
from stopping import EarlyStoppingCallback
from trainer import Trainer
from matplotlib import pyplot as plt
import numpy as np
from model import resnet
import torch.utils.data

# set up data loading for the training and validation set using t.utils.data.DataLoader and the methods implemented in data.py
#

train_data=get_train_dataset()
valid_data=get_validation_dataset()
t_data = torch.utils.data.DataLoader(train_data, batch_size=32, shuffle=True, num_workers=2)

v_data = torch.utils.data.DataLoader(valid_data, batch_size=32, shuffle=True, num_workers=2)

# set up your model
my_model = resnet.Model()

# set up loss (you can find preimplemented loss functions in t.nn) use the pos_weight parameter to ease convergence
loss = t.nn.BCEWithLogitsLoss()
# set up optimizer (see t.optim);
optim = t.optim.Adam(my_model.parameters(), lr=0.001)
# initialize the early stopping callback implemented in stopping.py and create a object of type Trainer
early_stopping_callback = EarlyStoppingCallback()

# go, go, go... call fit on trainer
trainer = Trainer(my_model, loss, optim, t_data, v_data, cuda=True, early_stopping_cb=early_stopping_callback)
from data import get_train_dataset, get_validation_dataset
import model.GenderClassifierModel as gcm
from trainer import Trainer
from stopping import EarlyStoppingCB
import matplotlib.pyplot as plt
import numpy as np
import torch as t

epoch = 50

trainSet = get_train_dataset()
validSet = get_validation_dataset()

print("Training set size (augmented): ",trainSet.__len__(),"samples")
print("Validation set size: ",validSet.__len__(),"samples")

trainLoader = t.utils.data.DataLoader(dataset=trainSet,batch_size=128,shuffle=True)
validLoader = t.utils.data.DataLoader(dataset=validSet,batch_size=128,shuffle=True)

# set up your model
model = gcm.GCM()

# set up loss 
criteria = t.nn.MSELoss()

# set up optimizer (see t.optim); 
optimizer = t.optim.Adam(model.parameters(), lr=1e-3)

#set up scheduler
scheduler = t.optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.1)