Пример #1
0
        out = self.classifier(out)
        expected_shape = (batch_size, self.num_classes)
        assert out.shape == (batch_size, self.num_classes),\
            f"Expected output of forward pass to be: {expected_shape}, but got: {out.shape}"
        return out


if __name__ == "__main__":
    # Set the random generator seed (parameters, shuffling etc).
    # You can try to change this and check if you still get the same result!
    utils.set_seed(0)
    epochs = 10
    batch_size = 64
    learning_rate = 5e-2
    early_stop_count = 4
    dataloaders = load_cifar10(batch_size)
    # Network 1: (worst one)
    """print("Network 1:")
    model1 = Net1(image_channels=3, num_classes=10)
    trainer1 = Trainer(
        batch_size,
        learning_rate,
        early_stop_count,
        epochs,
        model1,
        dataloaders
    )
    trainer1.train()
    create_plots(trainer1, "task3_net2")
    _, train_acc = compute_loss_and_accuracy(
        trainer1.dataloader_train, trainer1.model, trainer1.loss_criterion
Пример #2
0
from transforms import model9_resnet_train_transforms, model9_resnet_test_transforms
from utils import plot_samples
from train import train_loop
from test import test_loop
import torch.optim as optim
import torch.nn as nn

#model = Model7()
model = ResNet18()
show_model_summary(model.to(DEVICE), (3, 32, 32))

# Constants, put in config
epochs = 50
cuda_batch_size = 128
cpu_batch_size = 4
num_workers = 4

# ToDo: Create separate transforms for train and test...
#transforms = model7_transforms()
(train_loader, test_loader, classes) = \
    dataloaders.load_cifar10(model9_resnet_train_transforms(), model9_resnet_test_transforms(),
                             cuda_batch_size, cpu_batch_size, num_workers)

plot_samples(train_loader)

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.009, momentum=0.9)

train_loop(epochs, train_loader, model, DEVICE, optimizer, criterion)
test_loop(test_loader, model, DEVICE, criterion)
Пример #3
0
from torchvision import utils, models
from dataloaders import load_cifar10
from torch import nn
from utils import to_cuda

data_train, data_val, data_test = load_cifar10(1, 0)

tensor = models.resnet18(pretrained=True)
tensor.fc = nn.Linear(512 * 4, 10)
for param in tensor.parameters():
    param.requires_grad = False
for param in tensor.fc.parameters():
    param.requires_grad = True
for param in tensor.layer4.parameters():
    param.requires_grad = True

henk = nn.Sequential(tensor.conv1, tensor.layer1, tensor.layer2, tensor.layer3,
                     tensor.layer4)
print(henk)

for batch_it, (x, y) in enumerate(data_train):
    x = nn.functional.interpolate(x, scale_factor=8)
    print(x.size())
    derp = henk.forward(x)
    print(derp.size())
    utils.save_image(x, 'task_3_f/image.png', nrow=256)
    for i in range(0, 10):
        utils.save_image(derp[0][i],
                         'task_3_f/filter' + str(i) + '.png',
                         nrow=164)
    print(derp)
Пример #4
0
                                                    eps=1e-08,
                                                    weight_decay=0.01,
                                                    amsgrad=False)
    else:
        return


if __name__ == "__main__":
    # Set the random generator seed (parameters, shuffling etc).
    # You can try to change this and check if you still get the same result!
    utils.set_seed(0)
    epochs = 10
    batch_size = 32
    learning_rate = 5e-2
    early_stop_count = 4
    dataloaders = load_cifar10(batch_size)

    #model for architecture 2 - one of best models
    model2 = load_model_settings(2)
    trainer2 = Trainer(batch_size, learning_rate, early_stop_count, epochs,
                       model2, dataloaders)

    load_train_settings(trainer2, 2)
    trainer2.train()

    #last data:
    print("training loss/accuracy:", trainer2.trainloss, trainer2.trainacc)
    print("validation loss/accuracy:", trainer2.valloss, trainer2.valacc)
    print("test loss/accuracy:", trainer2.testloss, trainer2.testacc)

    create_plots(trainer2, "task3_arch2_plot")
Пример #5
0
    utils.plot_loss(trainer1.TRAIN_LOSS, label="Training loss")
    utils.plot_loss(trainer1.VALIDATION_LOSS, label="Validation loss")
    utils.plot_loss(trainer1.TEST_LOSS, label="Testing Loss")
    plt.legend()
    plt.subplot(2, 1, 2)
    # The accuracy
    plt.title("Accuracy")
    utils.plot_loss(trainer1.VALIDATION_ACC, label="Validation Accuracy")
    utils.plot_loss(trainer1.TEST_ACC, label="Testing Accuracy")
    plt.legend()
    plt.savefig(plot_path.joinpath(f"{name}_plot.png"))
    plt.show()
    return


if __name__ == "__main__":
    epochs = 10
    batch_size = 32
    learning_rate = 5e-4
    early_stop_count = 4
    dataloaders = load_cifar10(batch_size, task4a=True)
    model = Model()
    trainer1 = Trainer(batch_size,
                       learning_rate,
                       early_stop_count,
                       epochs,
                       model,
                       dataloaders,
                       task4a=True)
    trainer1.train()
    create_plots(trainer1, "task4a")
Пример #6
0
    
    def forward(self, x):
        x = self.model(x)
        return x

if __name__ == "__main__":
    utils.set_seed(0)
    epochs = 10
    batch_size = 32

    learning_rate = 5e-4
    early_stop_count = 4
    dataloaders = load_cifar10(
        batch_size,
        augment=False,
        augment_extend=False,
        size=224,
        mean=(0.485, 0.456, 0.406),
        std= (0.229, 0.224, 0.225))

    model = Model()

    trainer = Trainer(
        batch_size,
        learning_rate,
        early_stop_count,
        epochs,
        model,
        dataloaders,
        l2_reg=0,
        lr_schedule_gamma=0.0,