示例#1
0
                         str(checkpoint_path))

        # Check if we should load a model:
        if load_model and checkpoint_path.exists():
            print("Loading old weights from:", str(checkpoint_path))
            model.load_weights(str(checkpoint_path))

        if args.cpu:
            with tf.device('/cpu:0'):
                if not args.eval:  # Training
                    model = train_model(model=model,
                                        target_size=target_size,
                                        batch_size=args.batch_size,
                                        list_of_categories=categories,
                                        train_epoch=args.epochs,
                                        steps_per_epoch=args.steps_per_epoch,
                                        validation_split=validation_split,
                                        train_folder=train_folder,
                                        checkpoint_path=checkpoint_path,
                                        tensorboard_path=tensorboard_path,
                                        as_gray=as_gray)
                # else just eval
                test_model(model=model,
                           target_size=target_size,
                           batch_size=args.batch_size,
                           list_of_categories=categories,
                           as_gray=as_gray,
                           test_folder=test_folder,
                           result_folder=result_folder,
                           image_type=image_type)
        else:
示例#2
0
# import pre-trained model
model, classifier_dict = model_functions.generate_model(
    args["arch"], args["hidden_units"])

# set grad_enabled setting
torch.set_grad_enabled(True)
for param in model.features.parameters():
    param.requires_grad = False

# set loss function and optimizer
criterion = nn.NLLLoss()
optimizer = optim.SGD(model.classifier.parameters(), lr=args["learning_rate"])

# tramsfer the model and train
model = model.to(device)
model = model_functions.train_model(model, args["epochs"], device, optimizer,
                                    criterion, train_loader, valid_loader)

# test the model with training dataset
model_functions.test_model(model, device, optimizer, criterion, test_loader)

# make the checpoint dictionary
checkpoint = {
    'epochs': args["epochs"],
    'arch': args["arch"],
    'classifier_dict': classifier_dict,
    'cumulative_epochs': args["epochs"],
    'state_dict': model.state_dict(),
    'class_to_idx': train_data.class_to_idx,
    'optimizer_state_dict': optimizer.state_dict()
}
示例#3
0
def train(args):
    print("Loading the data...")
    train_dir = args.data_dir + '/train'
    valid_dir = args.data_dir + '/valid'
    test_dir = args.data_dir + '/test'
    batch_size = 32
    train_transforms = transforms.Compose([
        transforms.RandomRotation(30),
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    val_transforms = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    test_transforms = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    # TODO: Load the datasets with ImageFolder
    train_dataset = datasets.ImageFolder(train_dir, transform=train_transforms)
    val_dataset = datasets.ImageFolder(valid_dir, transform=val_transforms)
    test_dataset = datasets.ImageFolder(test_dir, transform=test_transforms)

    # TODO: Using the image datasets and the trainforms, define the dataloaders
    trainloader = torch.utils.data.DataLoader(train_dataset,
                                              batch_size=batch_size,
                                              shuffle=True)
    valloader = torch.utils.data.DataLoader(val_dataset,
                                            batch_size=batch_size,
                                            shuffle=False)
    testloader = torch.utils.data.DataLoader(test_dataset,
                                             batch_size=batch_size,
                                             shuffle=False)
    print("Loading  data is finished")
    print("preparing the model...")
    if (args.arch == 'alexnet'):
        model = models.alexnet(pretrained=True)
        for parm in model.parameters():
            parm.requires_grid = False
        num_features = model.classifier[1].in_features
        new_network = nn.Sequential(
            OrderedDict([
                ('dropout1', nn.Dropout(p=0.5)),
                ('fc1', nn.Linear(num_features, args.hidden_units)),
                ('relu', nn.ReLU()),
                ('dropout2', nn.Dropout(p=0.5)),
                ('fc2', nn.Linear(args.hidden_units, args.hidden_units)),
                ('relu2', nn.ReLU()),
                ('dropout3', nn.Dropout(p=0.5)),
                ('fc3', nn.Linear(args.hidden_units, 102)),
                ('output', nn.LogSoftmax(dim=1)),
            ]))
        model.classifier = new_network
    if (args.arch == 'vgg16'):
        model = models.vgg16(pretrained=True)
        for parm in model.parameters():
            parm.requires_grid = False
        num_features = model.classifier[0].in_features
        new_network = nn.Sequential(
            OrderedDict([
                ('dropout1', nn.Dropout(p=0.5)),
                ('fc1', nn.Linear(num_features, args.hidden_units)),
                ('relu', nn.ReLU()),
                ('dropout2', nn.Dropout(p=0.5)),
                ('fc2', nn.Linear(args.hidden_units, args.hidden_units)),
                ('relu2', nn.ReLU()),
                ('dropout3', nn.Dropout(p=0.5)),
                ('fc3', nn.Linear(args.hidden_units, 102)),
                ('output', nn.LogSoftmax(dim=1)),
            ]))
        model.classifier = new_network
    optimizer = optim.SGD(model.classifier.parameters(), lr=args.lr)
    criterion = nn.NLLLoss()
    device = "cpu"
    if (args.gpu == True):
        device = utilize.ProcessType()
    model.to(device)
    model.class_to_idx = train_dataset.class_to_idx
    print("Preparing is finished")
    print("Start training...")
    model = fm.train_model(model, optimizer, criterion, trainloader, valloader,
                           args.epochs, device, args.save_dir)
    model.epoch = args.epochs
    print("Training End...")
    fm.saveCheckPoint(model,
                      "_" + args.arch + "_model_epochs" + str(args.epochs),
                      args.save_dir)
    print("checkpoint is saved in path \ " + args.save_dir)
示例#4
0
                    type=str,
                    action='store',
                    default='checkpoint.pth',
                    help='Select save path, default "checkpoint.pth"')

parse = parser.parse_args()

architecture = parse.architecture
data_dir = parse.data_dir
hidden_layer = parse.hidden_layer
alpha = parse.learning_rate
processor_unit = parse.gpu
epochs = parse.epochs
chkt_path = parse.save_dir
###################################################################
dataloaders = model_functions.load_data(data_dir)

architecture, hidden_layer = model_functions.model_build(
    model, classifier, criterion, optimizer)

model_functions.validation_pass(model, validation_loader, criterion)

model_functions.train_model(model, criterion, optimizer, alpha, epochs,
                            processor_unit)

model_functions.test_accuracy(model, test_loader)

model_functions.save_model(chkt_path, architecture)

print("Model training completed...")
示例#5
0
current_directory = os.getcwd()
final_directory = os.path.join(current_directory, folder)
dim_red_names = list(Reduced_datasets.keys())
Selected_Nets = [6]
#Run experiments to vary network architecture and dimensionality reduction
for name in dim_red_names:
    data_folder = final_directory + name
    if not os.path.exists(data_folder):
        os.makedirs(data_folder)
    #Visualize dataset using TSNE (training)
    if (visualize):
        TSNE_Visual(Reduced_datasets[name]['train'].files,
                       Reduced_datasets[name]['train'].targets,data_folder,name)
    for network in Selected_Nets:
        #Train model using 3-fold CV and save visual results
        Network_folder = data_folder + '/' + 'Network ' + str(network) + '/'
        if not os.path.exists(Network_folder):
            os.makedirs(Network_folder)
        num_features = Reduced_datasets[name]['train'].files.shape[1]
        model = Networks[network](in_features=num_features)
        optimizer = optim.Adam(model.parameters(),lr=.001)
        criterion = nn.CrossEntropyLoss()
        #Compute number of parameters once and save to Network folder
        if name == dim_red_names[0]:
            #Print number of trainable parameters
            num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
                # Write to text file
            with open((Network_folder + 'NumParams.txt'), "w") as output:
                output.write(str(num_params))
        train_model(model,Reduced_datasets[name],batch_size,criterion,optimizer,
                    num_epochs=num_epochs,k=fold_num,data_folder=Network_folder)
# We want to ensure the final layers are consistent with our problem by using an ordered dictionary.
classifier = nn.Sequential(OrderedDict([('fc1', nn.Linear(input_size, arguments.num_hidden)), # Number of neurons in the hidden layer
                                        ('relu', nn.ReLU()), # ReLU activation function will squish the output space of the previous layer
                                        ('drop', nn.Dropout(p=0.5)), # Used to prevent overfitting - for any node in the network this will equate to 50% chance it will be randomly turned off
                                        ('fc2', nn.Linear(arguments.num_hidden, 102)), # Input 5000, Output layer of 102
                                        ('output', nn.LogSoftmax(dim=1))])) # Squishes the output space to be between 0 - 1 i.e. probability of class assignment for each image. Ideal for multiclass problems

# Ensure we overwrite the model classifier with the newly configured ordered dictionary
model.classifier = classifier

# Setting up the model input arguments (hyperparameters)

# Model, criterion, optimizer, scheduler, num_epochs=25, device='cuda'

# Model is initialised in block above to pretrained vgg16 with classifier adjusted

# Criteria here represents the loss function used to evaluate the model fit 
# NLLLoss which is recommended with Softmax final layer
criteria = nn.NLLLoss()

# Observe that all parameters are being optimized with a learning rate of 0.001 for gradient descent
optim = torch.optim.Adam(model.classifier.parameters(), arguments.learning_rate)

# Provides different methods for adjusting the learning rate and step size used during optimisation
# Decay LR by a factor of 0.1 every 3 epochs
sched = lr_scheduler.StepLR(optim, step_size=arguments.step_size_sched, gamma=arguments.gamma_lrsched)

model_functions.train_model(model, criteria, optim, sched, arguments.epochs, dataset_sizes, data_loaders, arguments.device)
model_functions.test_acc(model, data_loaders, arguments.device)
model_functions.save_checkpoint(model, optim, image_datasets, arguments.arch, arguments.epochs, arguments.learning_rate, input_size, arguments.num_hidden)
示例#7
0
                         horizontal_flip=True,
                         vertical_flip=True,
                         fill_mode='nearest')
    print(model.summary())
    if args.cpu:
        with tf.device('/cpu:0'):
            print(tf.device)
            if not args.eval:  # Training
                model = train_model(model=model,
                                    target_size=target_size,
                                    batch_size=args.batch_size,
                                    list_of_categories=categories,
                                    train_epoch=args.epochs,
                                    steps_per_epoch=args.steps_per_epoch,
                                    validation_split=validation_split,
                                    train_folder=train_folder,
                                    val_folder=val_folder,
                                    checkpoint_path=checkpoint_path,
                                    tensorboard_path=tensorboard_path,
                                    as_gray=as_gray,
                                    data_gen_args=data_gen_args,
                                    early_stop_number=args.early_stop,
                                    image_type=image_type)
                print("Done training")

            model.load_weights(str(checkpoint_path))
            # else just eval
            test_model(model=model,
                       target_size=target_size,
                       batch_size=args.batch_size,
                       list_of_categories=categories,
示例#8
0
parser.add_argument("--gpu", help="Use GPU for training", action="store_true")
args = parser.parse_args()

image_datasets, data_loaders, _ = modfunc.transform_load(args.data_directory)
model, criterion, optimizer = modfunc.create_model(
    arch=args.arch,
    dropout=args.dropout,
    hidden_units=args.hidden_units,
    learning_rate=args.learning_rate)

if args.gpu:
    device = 'cuda'
else:
    device = 'cpu'

modfunc.train_model(image_datasets, data_loaders, model, criterion, optimizer,
                    args.epochs, device)
modfunc.model_test(image_datasets, data_loaders, model, 'test', criterion,
                   device)

if args.save_dir:
    othfunc.save_model(image_datasets,
                       args.arch,
                       model,
                       args.dropout,
                       args.hidden_units,
                       args.learning_rate,
                       args.epochs,
                       optimizer,
                       checkpoint=args.save_dir + '/' + args.checkpoint)
    print("Model saved as", args.save_dir + '/' + args.checkpoint)