Example #1
0
def main():
    # prepare data
    train, valid = utility.prepare_data()

    # get model
    model = nvidia_model()
    model.summary()

    # generators for training and validation
    BATCH = 128
    train_gen = utility.next_train_batch(train, BATCH)
    valid_gen = utility.next_valid_batch(valid, BATCH)

    # training
    EPOCHS = 5
    TRAINS = 20480
    VALIDS = 4096
    model.compile(optimizer=Adam(1e-2), loss="mse")
    history = model.fit_generator(train_gen,
                                  samples_per_epoch=TRAINS,
                                  nb_epoch=EPOCHS,
                                  validation_data=valid_gen,
                                  nb_val_samples=VALIDS,
                                  verbose=1)

    # save model, weights
    model.save_weights('model.h5')
    with open('model.json', 'w') as f:
        f.write(model.to_json())
Example #2
0
def train():
    args = utility.get_inputs()
    image_dataset, dataloader = utility.prepare_data(args.datadir)
    model = build_model(args.arch, args.hidden_units)
    criterion = nn.NLLLoss()
    optimizer = optim.Adam(model.classifier.parameters(),
                           lr=args.learning_rate)

    model.to(args.gpu)

    with active_session():
        steps = 0
        running_loss = 0
        print_every = 40
        for e in range(args.epochs):

            for images, labels in dataloader['train']:
                model.train()
                steps += 1
                images, labels = images.to(args.gpu), labels.to(args.gpu)

                optimizer.zero_grad()
                output = model.forward(images)
                loss = criterion(output, labels)
                loss.backward()
                optimizer.step()

                running_loss += loss.item()

                if steps % print_every == 0:
                    model.eval()

                    with torch.no_grad():
                        test_loss, accuracy = validation(
                            model, dataloader['test'], criterion, args.gpu)

                    print(
                        "Epoch: {}/{}.. ".format(e + 1, args.epochs),
                        "Training Loss: {:.3f}.. ".format(running_loss /
                                                          print_every),
                        "Validation Loss: {:.3f}.. ".format(
                            test_loss / len(dataloader['test'])),
                        "Test Accuracy: {:.3f}".format(
                            accuracy / len(dataloader['test'])))

                    running_loss = 0
                    model.train()

    model.optimizer_state_dict = optimizer.state_dict()
    model.class_to_idx = image_dataset['train'].class_to_idx
    return model, args.save_dir
Example #3
0
                    type=int,
                    help='the number of hidden units')

args = parser.parse_args()

data_dir = args.datadirectory
save_dir = args.save_dir
arch = args.arch
hidden_units = args.hidden_units
epochs = args.epochs if args.epochs else 10
lr = args.learning_rate if args.learning_rate else 0.001
use_gpu = True if args.gpu else False

if hidden_units:
    model = network.Model(arch, hidden_units, pretrained=True)
else:
    model = network.Model(arch, pretrained=True)

train_data = prepare_data(data_dir + '/train/', True)
validation_data = prepare_data(data_dir + '/valid/', False)
train_dataloader = dataloader(train_data, True)
validation_dataloader = dataloader(validation_data, False)
trained_model = train(model, lr, train_dataloader, validation_dataloader,
                      epochs, use_gpu)

if save_dir:
    save_checkpoints(train_data, trained_model, save_dir)

else:
    print('Training is finished, no directory found to save model')
Example #4
0
# From:https://campus.datacamp.com
# Import the libraries
import utility as u

train, test = u.load_data()
train = u.prepare_data(train)

# Create train_two with the newly defined feature
train_two = train.copy()
train_two["Family_size"] = 1
train_two["Family_size"] = train_two["SibSp"] + train_two["Parch"] + 1
train_two["Family_size"] = train_two["Family_size"].fillna(1)
print(train_two)

# Create a new feature set and add the new feature
feature_list = ["Pclass", "Sex", "Age", "Fare", "SibSp", "Parch", "Family_size"]

#Control overfitting by setting "max_depth" to 10 and "min_samples_split" to 5 : my_tree_two
max_depth = 10
min_samples_split = 5
my_tree, features, target = u.train_model(train_two, feature_list, max_depth, min_samples_split)

# Look at the importance and score of the included features
print(my_tree.feature_importances_)
print(my_tree.score(features, target))

u.plot_result(feature_list, my_tree, features, target)
# From:https://campus.datacamp.com
# Import the libraries
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
#import sklearn.ensemble.RandomForestClassifier as RandomForestClassifier
import utility as u

train, test = u.load_data()
train = u.prepare_data(train)
test = u.prepare_data(test)

# We want the Pclass, Age, Sex, Fare, SibSp, Parch, and Embarked variables
feature_list = ["Pclass", "Age", "Sex", "Fare", "SibSp", "Parch", "Embarked"]
features_forest = train[feature_list].values
target = train["Survived"].values

# Building and fitting my_forest
forest = RandomForestClassifier(max_depth=10,
                                min_samples_split=2,
                                n_estimators=100,
                                random_state=1)
my_forest = forest.fit(features_forest, target)

# Print the score of the fitted random forest
print(my_forest.score(features_forest, target))

# Compute predictions on our test set features then print the length of the prediction vector
test_features = test[feature_list].values
pred_forest = my_forest.predict(test_features)
print(len(pred_forest))
Example #6
0
    model.compile(optimizer=Adam(1e-2), loss="mse")
    history = model.fit_generator(train_gen,
                                  samples_per_epoch=TRAINS,
                                  nb_epoch=EPOCHS,
                                  validation_data=valid_gen,
                                  nb_val_samples=VALIDS,
                                  verbose=1)

    # save model, weights
    model.save_weights('model.h5')
    with open('model.json', 'w') as f:
        f.write(model.to_json())



if __name__ == '__main__':
    #main()
    train, valid = utility.prepare_data()
    #utility.test_image_shear(train)

    #utility.test_image_gamma(train)

    #utility.test_image_flip(train)

    #utility.test_augmented_image(train)

    utility.test_left_center_right(train)