示例#1
0
def main(config):
    # Use GPU!
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("Device:  " + str(device))

    # Simple transform
    transform = transforms.Compose([transforms.ToTensor()]) #transforms.Normalize((0.1307,), (0.3081,)

    # Get MNIST Datasets
    save = config.data_loc #'./data/MNIST' #'/home/kartik/git/ADLG/Assignment2/data/MNIST'
    trainloader, testloader,_ = LoadMNIST(save, transforms_=transform,batch_size=config.batch_size)

    epochs = config.epochs
    print(config.epochs)

    model = AutoEncoder()

    # opt = optim.Adam(model.parameters(), lr=0.0005, betas=(0.9, 0.95))
    opt = optim.SGD(model.parameters(), lr=config.lr)

    # Loss function
    # cost = config.loss
    cost = eval(config.loss + "()") # nn.MSELoss()
    loss_name = eval(config.loss + '.__name__')
    # cost = nn.BCELoss() #reduce=False

    decode_out = config.save_images + loss_name + '/' #'./save/Decoder_Images/'

    # Create a trainer
    trainer = Trainer(model, opt, cost, name="Default Autoencoder",
                      device=device, decode_out=decode_out)

    # Run training
    trainer.Train(trainloader, epochs)
示例#2
0
    def train(self, triplet=True):
        # generate dataset for PyTorch
        from data.dataset import TruncatedInputFromMFB, ToTensor, SpeakerDataset
        from torchvision import transforms
        from torch.utils.data import DataLoader
        import torch
        transform = transforms.Compose([TruncatedInputFromMFB(), ToTensor()])
        if Config.MODEL_TYPE == 'cnn3d':
            from data.dataset3d import SpeakerDataset3D
            initial_dataset = SpeakerDataset(transform=transform)
            train_dataset = SpeakerDataset3D(initial_dataset)
        else:
            train_dataset = SpeakerDataset(transform=transform)

        # instantiate a model
        if Config.MODEL_TYPE == 'rescnn':
            from models.rescnn import ResNet
            model_ = ResNet(layers=Config.RESCNN_LAYERS,
                            num_classes=Config.NUM_CLASSES)
        elif Config.MODEL_TYPE == 'gru':
            from models.gru import GRU
            model_ = GRU(layers=Config.GRU_LAYERS,
                         num_classes=Config.NUM_CLASSES)
        elif Config.MODEL_TYPE == 'cnn3d':
            from models.cnn3d import CNN3D
            model_ = CNN3D(num_classes=Config.NUM_CLASSES)

        from utils.train import Trainer
        model_ = model_.cuda()
        epoch = Config.SOFTMAX_TRAINING_EPOCH
        for i in range(epoch):
            optimizer = torch.optim.Adam(model_.parameters())
            train_loader = DataLoader(train_dataset,
                                      batch_size=Config.PRETRAIN_BATCH_SIZE,
                                      shuffle=True)
            Trainer.train(train_loader, model_, optimizer, i)

        if triplet:
            from copy import deepcopy
            model_tri = deepcopy(model_)
            model_tri = model_tri.cuda()
            epoch_ = Config.TRIPLET_TRAINING_EPOCH
            for i in range(epoch_):
                optimizer_ = torch.optim.SGD(model_tri.parameters(),
                                             lr=Config.TRIPLET_LR -
                                             i * Config.TRIPLET_LR_DECAY,
                                             momentum=Config.TRIPLET_MOMENTUM)
                train_loader = DataLoader(
                    train_dataset,
                    batch_size=Config.FINETUNE_BATCH_SIZE,
                    shuffle=True)
                Trainer.train_tri(train_loader,
                                  model_tri,
                                  optimizer_,
                                  i,
                                  semi_hard=True,
                                  triplet_margin=Config.TRIPLET_MARGIN)
#################################
# Custom dynamic plotting class #
plotter = AccuracyPlot()


#################################
# Train the model, plot accuracy
#################################
# Optimizer
opt = optim.Adam(model.parameters(), lr=0.0005, betas=(0.9, 0.95))

# Loss function
cost = nn.CrossEntropyLoss()

# Create a trainer
trainer = Trainer(model, opt, cost, name="Default CNN", device=device)

# Add test accuracy plotting
plotter.new_line("Default CNN")
trainer.SetEpochCallback(plotter.EpochCallback)

# Run training
trainer.Train(trainloader, epochs, testloader=testloader)


#################################
# Create the assignment Resnet (part a)
#################################
def GetDefaultResNet():
    resnet = ResNet(in_features= [32, 32, 3],
                    num_class=10,
示例#4
0
文件: main.py 项目: jvsguerra/MO433
import torch
import numpy as np

from utils.train import Trainer
from utils.plots import plot_loss, plot_bpd

if __name__ == "__main__":
    # Seed
    seed = 10
    np.random.seed(seed)
    torch.manual_seed(seed)

    # Gitlab
    print('Running our network for one epoch ...')
    model = Trainer(lr=1e-3, epochs=1, device='cpu', subset=True, label='dog')
    print('[==> Visualize training images ...')
    model.visualize(fname='output/dogs_cifar_trainset.png')
    print('[==> Building model ...')
    model.build()
    print('[==> Fitting model ...')
    model.fit()

    # Pre-trained model
    # 1) Full CIFAR-10
    model = Trainer(lr=1e-3, epochs=30, device='cpu', subset=False)
    print('[==> Visualize training images ...')
    model.visualize(fname='output/cifar_trainset.png')
    model.build()
    # Load pre-trained model
    print('[==> Loading pre-trained model')
    model.load_model('input/pre_trained/cifar/net_final.model')
model = GetCNN()

# Display model specifications
summary(model, (3, 32, 32))

# Send model to GPU
model.to(device)

# Specify optimizer
opt = optim.Adam(model.parameters(), lr=0.0005, betas=(0.9, 0.95))

# Specify loss function
cost = nn.CrossEntropyLoss()

# Train the model
trainer = Trainer(device=device, name="Basic_CNN")
epochs = 5
trainer.Train(model,
              trainloader,
              testloader,
              cost=cost,
              opt=opt,
              epochs=epochs)

# Load best saved model for inference
model_loaded = GetCNN()

# Specify location of saved model
PATH = "./save/Basic_CNN-best-model/model.pt"
checkpoint = torch.load(PATH)
示例#6
0
    batch_size : Batch size of test and train data
"""
config = {
    "l1": tune.sample_from(
        lambda _: 2**np.random.randint(2, 9)),  # eg. 4, 8, 16 .. 512
    "l2": tune.sample_from(
        lambda _: 2**np.random.randint(2, 9)),  # eg. 4, 8, 16 .. 512
    "lr": tune.loguniform(1e-4,
                          1e-1),  # Sampling from log uniform distribution
    "decay": tune.sample_from(
        lambda _: 10**np.random.randint(-7, -3)),  # eg. 1e-7, 1e-6, .. 1e-3
    "batch_size": tune.choice([32, 64, 128, 256])
}

# calling trainer
trainer = Trainer(device=device)
"""ASHA (Asynchronous Successive Halving Algorithm) scheduler
        max_t              : Maximum number of units per trail (can be time or epochs)
        grace_period       : Stop trials after specific number of unit if model is not performing well (can be time or epochs)
        reduction_factor   : Set halving rate
"""
scheduler = ASHAScheduler(max_t=max_num_epochs,
                          grace_period=4,
                          reduction_factor=4)
"""Population based training scheduler
    time_attr             : Can be time or epochs
    metric                : Objective of training (loss or accuracy)
    perturbation_interval : Perturbation occur after specified unit (can be time or epochs)
    hyperparam_mutations  : Hyperparameters to mutate
"""
scheduler = PopulationBasedTraining(
示例#7
0
# Load the pre-trained model
#################################

model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Sequential(
    nn.Dropout(0.5),
    nn.Linear(num_ftrs, 10)
)


model_ft = model_ft.to(device)

# Loss function
cost = nn.CrossEntropyLoss()

# Optimizer
lr = 0.0005
# opt = optim.SGD(model_ft.parameters(), lr=lr, momentum=0.9)
opt = torch.optim.Adam(model_ft.parameters(), lr=lr, betas=(0.9, 0.95), weight_decay=1e-4) #0.0005 l2_factor.item()

# Create a trainer
trainer = Trainer(model_ft, opt, cost, name="Transfer-learning",lr=lr , use_lr_schedule=True, device=device)

# Run training
epochs = 25
trainer.Train(trainloader, epochs, testloader=testloader)
# trainer.Train(trainloader, epochs) # check train error

print('done')
示例#8
0
lr = 0.0005

# opt.load_state_dict(checkpoint['optimizer'])
opt = optim.Adam(model_loaded.parameters(), lr=lr, betas=(0.9, 0.999))
opt.param_groups[0]['lr'] = 0.005  #0.005

# opt = optim.SGD(model.parameters(), lr=0.0005, momentum=0.9)

# Loss function
cost = nn.CrossEntropyLoss()

# Create a trainer
trainer = Trainer(model_loaded,
                  opt,
                  cost,
                  name="ResNet2-accuracy_above_85",
                  lr=lr,
                  use_lr_schedule=True,
                  device=device)

# Add test accuracy plotting
plotter.new_line("Default ResNet")
trainer.SetEpochCallback(plotter.EpochCallback)

# Run training
epochs = 50
trainer.Train(trainloader, epochs, testloader=testloader)
# trainer.Train(trainloader, epochs) # check train error

plotter.show()
def main():
    batch_size = args.batch_size
    augment = args.augment
    dataloaders = train_val_dataloaders(TRAIN_DIR, DEV_DIR, augment, batch_size)

    train_dataloader = dataloaders["train"]
    val_dataloader = dataloaders["val"]

    show = args.show_batch

    if show:
        # Let's have a look at the first batch
        print("Show batch from train dataloader: ")
        show_batch(train_dataloader, class_names)

        print("Show batch from val dataloader: ")
        show_batch(val_dataloader, class_names)

    # Model - pretrained ResNet18, trained on ImageNet
    model = models.resnet18(pretrained=True)

    # Disable grad for all conv layers
    for param in model.parameters():
        param.requires_grad = False
    print(
        "Output of ResNet18 before FC layer, that we add later: ", model.fc.in_features
    )
    # Add FC layer with 2 outputs: cleaned or dirty
    model.fc = torch.nn.Linear(model.fc.in_features, 5)

    # Put model on GPU
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = model.to(device)

    # Loss function - binary Cross-Entropy
    loss = torch.nn.CrossEntropyLoss()

    learning_rate = args.learning_rate
    # Optimization method - Adam
    optimizer = torch.optim.Adam(model.parameters(), amsgrad=True, lr=learning_rate)

    number_epochs = args.num_epochs
    # Training
    model_trainer = Trainer(
        model,
        train_dataloader,
        val_dataloader,
        loss,
        optimizer,
        device,
        num_epochs=number_epochs,
    )

    print("Begin training: ")
    model_trainer.train()

    # Draw losses and save the plot
    model_trainer.draw_losses("ResNet18", "Adam")

    # Save weights of the model
    model_trainer.save_weights("ResNet18", "Adam")

    # Save trained model
    model_trainer.save_model("ResNet18", "Adam")