Exemplo n.º 1
0
# Define the optimizer
learning_rate = arg_dict['learning_rate']
optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)

# Send the model to the device
model.to(device)

# Train the network
epochs = arg_dict['epochs']

if device == torch.device('cuda'):
    # Keep the workspace active
    with active_session():
        print('Begin training...')
        train(model, trainloader, validloader, criterion, optimizer, epochs,
              device)
else:
    print('Begin training...')
    train(model, trainloader, validloader, criterion, optimizer, epochs,
          device)

# Test the network
test(model, testloader, device)

# Save the checkpoint
checkpoint = {
    'arch': arch,
    'input_size': arch_input_dict[arch],
    'output_size': 102,
    'hidden_layers': hidden_units,
    'state_dict': model.state_dict(),
Exemplo n.º 2
0
from helper_functions import read_data
from model_functions import train, feature_importances, confusion_matrix_evaluate, roc_curve_evaluate
from sklearn.model_selection import train_test_split


if __name__ == '__main__':
    # Read in data
    X, y = read_data()

    # Split into a training and test set
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)

    # Train a model
    model, vectorizer = train(X_train, y_train, X_test, y_test)

    # Plot feature importances
    feature_importances(model, vectorizer)

    # Plot confusion matrix
    confusion_matrix_evaluate(model, X_test, y_test)

    # Trains a OneVsRest classifier and plots ROC curve.
    roc_curve_evaluate(X_train, y_train, X_test, y_test, kfold=False)
Exemplo n.º 3
0
def start_train(args):
    with open(args.config_path) as f:
        options = json.load(f)
        f.close()
    # ~ print(options_dict)
    train(options)
Exemplo n.º 4
0
parser.add_argument("data_dir", help="The path of data directory to train the network on",type=str)
parser.add_argument("--save_dir", help="Optionally set the directory to save checkpoints",type=str)
parser.add_argument("--arch", help="Choose model architecture from torchvision.models",choices=arch_supported, type=str)
parser.add_argument("--learning_rate", help="Optionally set a learning rate for the model",type=float)
parser.add_argument("--hidden_units", help="Optionally set an amount of hidden units in the architecture",type=int)
parser.add_argument("--epochs", help="Optionally set the amount of epochs to train",type=int)
parser.add_argument("--gpu", help="Optionally use GPU for training", action="store_true")

args = vars(parser.parse_args())
args_dict = dict(filter(lambda elem: (elem[1] != None) and (elem[1] != False), args.items()))

# Define transforms for training, validation and test data, given the required model input image size and normalization
transforms = utility.data_transforms(224,[0.485, 0.456, 0.406],[0.229, 0.224, 0.225])

# Load the datasets with ImageFolder, using the parameter transforms
image_datasets, n_classes = utility.load_datasets(args_dict['data_dir'],transforms)

dataloaders = utility.create_dataloaders(image_datasets,batch = 64)

kwargs_model =  {k:v for (k,v) in args_dict.items() if k in ['arch','hidden_units','gpu']}

model = model_functions.build(n_classes, **kwargs_model)

model.class_to_idx = image_datasets['train'].class_to_idx

kwargs_train =  {k:v for (k,v) in args_dict.items() if k in ['epochs','gpu','learning_rate']}
model_trained = model_functions.train(model,dataloaders,print_every = 5,**kwargs_train)

kwargs_save =  {k:v for (k,v) in args_dict.items() if k in ['save_dir']}
utility.save_checkpoint(model_trained,**kwargs_save)
from model_functions import train, save_model

model = train()

save_model(model)