Ejemplo n.º 1
0
                    type=int,
                    dest="hidden_units",
                    action="store",
                    default=120,
                    help="state the units for fist hidden layer (deafult 120)")

pa = parser.parse_args()
data_path = pa.data_dir
path = pa.save_dir
lr = pa.learning_rate
structure = pa.arch
dropout = pa.dropout
hidden_layer1 = pa.hidden_units
power = pa.gpu
epochs = pa.epochs

#load the data - invoke the data_load method from helper
trainloader, v_loader, testloader = hp.data_load(data_path)

#create the model
model, optimizer, criterion = hp.nn_arch(structure, dropout, hidden_layer1, lr,
                                         power)

#train the neural network
hp.train_network(model, optimizer, criterion, epochs, 20, trainloader, power)

#save  the train network checkpoint
hp.save_checkpoint(path, structure, hidden_layer1, dropout, lr)

print("The Model is trained")
Ejemplo n.º 2
0
                dest="learning_rate",
                default=0.001)
ap.add_argument('--dropout', type=float, dest="dropout", default=0.5)

pa = ap.parse_args()

data_dir = pa.data_dir
path = pa.save_dir
structure = pa.arch
power = pa.gpu
epochs = pa.epochs
hidden_layer = pa.hidden_units
lr = pa.learning_rate
dropout = pa.dropout

# load the 3 dataloaders (takes data_dir as argument)
trainloader, validloader, testloader, class_to_idx = helper.load_data(data_dir)

# set up model structure (takes structure, dropout, hidden_layer, lr, power)
model, optimizer, criterion = helper.nn_setup(structure, dropout, hidden_layer,
                                              lr, power)

# train and validate the network
helper.train_network(model, optimizer, criterion, epochs, 20, trainloader,
                     validloader, power)

helper.save_checkpoint(model, path, structure, hidden_layer, dropout, lr,
                       class_to_idx)

print("------------Model Trained!------------")
Ejemplo n.º 3
0
args=parser.parse_args()

#correctly sort the data for training, validation, testing
data_dir = args.data_dir
train_data, valid_data, test_data, trainloader, validloader, testloader = utility.preprocess_img(data_dir)

#create model using --arch argument with vgg19 as default
model = utility.torch_model(args.arch)
for param in model.parameters():
    param.requires_grad = False
#params are now frozen so that we do not backprop thru them again

#calculate input size into the network classifier
input_size = utility.get_input_size(model, args.arch)

model.classifier = helper.NeuralNetwork(input_size, args.output_size, [args.hidden_units], drop_p=0.35)

#define the loss function and the optimization parameters
criterion = nn.NLLLoss() #want nllloss because we do the logsoftmax as our output activation
optimizer = optim.Adam(model.classifier.parameters(), lr=args.learning_rate)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=4, gamma=0.1)

#train model
helper.train_network(model, trainloader,validloader,args.epochs, 40, criterion,optimizer,scheduler,args.gpu)

#test model
test_accuracy, test_loss = helper.loss_accuracy(model, testloader, criterion, args.gpu)
print("\n ---\n Test Accuracy: {:.2f} %".format(test_accuracy*100), "Test Loss: {}".format(test_loss))

#save network to checkpoint
utility.save_checkpoint(model, train_data, optimizer, args.save_dir, args.arch)
Ejemplo n.º 4
0
                    dest="fc2",
                    action="store",
                    default=1024,
                    help="state the units for fc2")

pa = parser.parse_args()
data_path = pa.data_dir
filepath = pa.save_dir
learn_r = pa.learn_r
architecture = pa.architecture
dropout = pa.dropout
fc2 = pa.fc2
gpu_cpu = pa.gpu_cpu
epoch_num = pa.epoch_num

# load the data - data_load() from help.py
trainloader, validationloader, testloader = hp.load_data(data_path)

# build model
model, optimizer, criterion = hp.nn_architecture(architecture, dropout, fc2,
                                                 learn_r)

# train model
hp.train_network(model, criterion, optimizer, trainloader, validationloader,
                 epoch_num, 20, gpu_cpu)

# checkpoint the model
hp.save_checkpoint(filepath, architecture, dropout, learn_r, fc2, epoch_num)

print("model has been successfully trained")
Ejemplo n.º 5
0
arg = argparse.ArgumentParser(description='Train.py')
# Command Line ardguments

arg.add_argument('data_dir', nargs='*', action="store", default="./flowers/")
arg.add_argument('--gpu', dest="gpu", action="store", default='device')
arg.add_argument('--save_dir', dest="save_dir", action="store", default="./checkpoint.pth")
arg.add_argument('--learning_rate', dest="learning_rate", action="store", default=0.003)
arg.add_argument('--dropout', dest = "dropout", action = "store", default = 0.2)
arg.add_argument('--epochs', dest="epochs", action="store", type=int, default=4)
arg.add_argument('--arc', dest="arc", action="store", default="resnet50", type = str)
arg.add_argument('--hidden_units', type=int, dest="hidden_units", action="store", default=512)

pa_arg = arg.parse_args()

from_loc = pa_arg.data_dir
checkpoint = pa_arg.save_dir
lr = pa_arg.learning_rate
structure = pa_arg.arc
dropout = pa_arg.dropout
dev = pa_arg.gpu
epochs = pa_arg.epochs
hidden_units = pa_arg.hidden_units


trainloader , testloader, validationloader,train_data = helper.load_data(from_loc)
model, criterion, optimizer = helper.netwk_setup(structure, dropout, lr,hidden_units)

helper.train_network(model,criterion,optimizer,trainloader,testloader,epochs)
helper.validation_data_set(model,validationloader,criterion)

helper.save_checkpoint(model,structure,hidden_units,dropout,lr,epochs,optimizer,checkpoint,from_loc)