Beispiel #1
0
def part2():
    print('Running part 2...')
    # load the concatenated datasets
    concat_train, concat_test = load_concat()

    # load the negative testset
    _, neg_test, _, _ = load_negative()

    # define lenet
    leNet = LeNet5("mnist")

    # define the loss and optimizer
    loss_fn = nn.CrossEntropyLoss()
    opt = torch.optim.Adam(params=leNet.parameters(), lr=0.001, weight_decay=0)

    # train on concatenated dataset for 10 epochs
    train(concat_train, concat_test, leNet, opt, loss_fn, 10, "leNet_concat")

    # print out final testing results
    concat_acc = test(concat_test, leNet, loss_fn)
    neg_acc = test(neg_test, leNet, loss_fn)
    print(
        f'The accuracy of concatenated training on concatenated testset is: {concat_acc}'
    )
    print(
        f'The accuracy of concatenated training on negative dataset alone is: {neg_acc}'
    )
Beispiel #2
0
    def setup_model(self, resume=False):
        print("Loading Model")
        self.model = LeNet5()
        self.optimizer = torch.optim.SGD(self.model.parameters(), lr=0.005)
        self.scheduler = lr_scheduler.MultiStepLR(self.optimizer,
                                                  milestones=[2, 5, 8, 12],
                                                  gamma=0.1)

        if resume:
            print("Resuming from saved model")
            self.load_saved_model()
        if torch.cuda.is_available():
            print("Using GPU")
            self.model.cuda()
Beispiel #3
0
def build_model(device):
    model = LeNet5(orig_c3=cfg.MODEL.ORIG_C3,
                   orig_subsample=cfg.MODEL.ORIG_SUBSAMPLE,
                   activation=cfg.MODEL.ACTIVATION,
                   dropout=cfg.MODEL.DROPOUT,
                   use_bn=cfg.MODEL.BATCHNORM)

    model.to(device)
    # Check model dependencies using backprop.
    model_checker(model, train_dataset, device)

    # Load pretrained model if specified.
    if cfg.TRAIN.PRETRAINED_PATH != '':
        load_checkpoint(model, cfg.TRAIN.PRETRAINED_PATH)

    return model
Beispiel #4
0
def run():
    generator = Generator().to(device)

    #teacher = torch.load(opt.teacher_dir + 'teacher').to(device)
    teacher = LeNet5()
    teacher.load_state_dict(torch.load("cache/models/lenet_mnist.pt"))
    teacher.eval()
    teacher.to(device)
    criterion = torch.nn.CrossEntropyLoss().to(device)

    teacher = nn.DataParallel(teacher)
    generator = nn.DataParallel(generator)

    # Optimizers
    optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr_G)

    # ----------
    #  Training
    # ----------
    for epoch in range(opt.n_epochs):

        for i in range(120):
            generator.train()
            z = Variable(torch.randn(opt.batch_size,
                                     opt.latent_dim)).to(device)
            optimizer_G.zero_grad()
            gen_imgs = generator(z)
            outputs_T, features_T = teacher(gen_imgs, out_feature=True)
            pred = outputs_T.data.max(1)[1]
            loss_activation = -features_T.abs().mean()
            loss_one_hot = criterion(outputs_T, pred)
            softmax_o_T = torch.nn.functional.softmax(outputs_T,
                                                      dim=1).mean(dim=0)
            loss_information_entropy = (softmax_o_T *
                                        torch.log(softmax_o_T)).sum()
            loss = loss_one_hot * opt.oh + loss_information_entropy * opt.ie + loss_activation * opt.a
            loss.backward()
            optimizer_G.step()
            if i == 1:
                print(
                    "[Epoch %d/%d] [loss_oh: %f] [loss_ie: %f] [loss_a: %f]" %
                    (epoch, opt.n_epochs, loss_one_hot.item(),
                     loss_information_entropy.item(), loss_activation.item()))

    torch.save(generator.state_dict(), opt.output_dir + "generator_only.pt")
    print("generator saved at ", opt.output_dir + "generator_only.pt")
Beispiel #5
0
def part1():
    print('Running part 1...')
    # load in the positive and negative data
    train_loader_pos, test_loader_pos, _, _ = load_positive()
    _, test_loader_neg, _, _ = load_negative()

    # define the model
    leNet = LeNet5("mnist")

    # define the loss and optimizer
    loss_fn = nn.CrossEntropyLoss()
    opt = torch.optim.Adam(params=leNet.parameters(), lr=0.001, weight_decay=0)

    # train the leNet for 10 epochs
    train(train_loader_pos, test_loader_pos, leNet, opt, loss_fn, 10,
          "leNet5_part1_pos")

    # get the test accuracy on the negative labels
    neg_acc = test(test_loader_neg, leNet, loss_fn)
    pos_acc = test(test_loader_pos, leNet, loss_fn)
    print(f'The normal test accuracy is: {pos_acc}')
    print(f'The accuracy on negative data is: {neg_acc}')
Beispiel #6
0
def evaluate_pictures(dataset, net=None):
    if net is None:
        net = LeNet5(NUM_CLASSES).to(device)
        net.load_state_dict(torch.load(SAVE_PATH))

    figure = plt.figure(figsize=FIG_SIZE, dpi=400)
    net.eval()

    for idx in range(1, NUM_ROWS*NUM_COLUMNS + 1):
        plt.subplot(NUM_ROWS, NUM_COLUMNS, idx)
        plt.axis('off')
        plt.imshow(dataset.data[idx], cmap='gray_r')

        with torch.no_grad():
            inp = dataset[idx][0].unsqueeze(0)
            inp = inp.to(device)
            _, probs = net(inp)

        title = f'{torch.argmax(probs)} ({torch.max(probs * 100):.0f}%)'
        plt.title(title, fontsize=7)

    figure.suptitle('LeNet 5 - Predictions')
    plt.savefig('predictions.png', format=FIG_FORMAT)
Beispiel #7
0
# disable tensorflow debugging messages
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

from lenet5 import LeNet5

if __name__ == "__main__":
    model = LeNet5(input_shape=(32, 32, 1), classes=10)
    model.summary()
Beispiel #8
0
                            sampler=sampler.SubsetRandomSampler(
                                range(n_train)),
                            num_workers=num_workers)

valLoader = DataLoader(trainingSet,
                       batch_size=batch_size,
                       sampler=sampler.SubsetRandomSampler(
                           range(n_train, 60000)),
                       num_workers=num_workers)
testSet = datasets.MNIST(root='/home/MNIST', train=False, transform=transform)
testLoader = DataLoader(dataset=testSet, batch_size=64, shuffle=False)

# pre-trained ResNet50 in PyTorch and modify the output layer (fc)
device = torch.device('cpu')

model = LeNet5(10).to(device)
lossFunc = torch.nn.CrossEntropyLoss()
opt = torch.optim.Adam(model.parameters(), lr=0.001)

# begin to train
num_epoch = 5
for epoch in range(num_epoch):
    running_loss = 0
    model.train()
    for step, data in enumerate(trainingLoader, start=0):
        images, labels = data
        images = images.to(device)
        labels = labels.to(device)
        pred, _ = model(images)
        loss = lossFunc(pred, labels)
        opt.zero_grad()
Beispiel #9
0
    loss_value = p_steps_loss["loss_value"]
    steps = list(map(int, steps))
    loss_value = list(map(float, loss_value))
    plt.plot(steps, loss_value, color="red")
    plt.xlabel("Steps")
    plt.ylabel("Loss_value")
    plt.title("Change chart of model loss value")
    plt.show()


if __name__ == "__main__":
    lr = 0.01
    momentum = 0.9

    # create the network
    network = LeNet5()

    # define the optimizer
    net_opt = nn.Momentum(network.trainable_params(), lr, momentum)

    # define the loss function
    net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')

    epoch_size = 5
    mnist_path = "./MNIST/"
    model_path = "./model/ckpt/mindspore_quick_start/"

    repeat_size = 1
    ds_train = create_dataset(os.path.join(mnist_path, "train"), 32,
                              repeat_size)
    ds_eval = create_dataset(os.path.join(mnist_path, "test"), 32)
Beispiel #10
0
def get_whole_model():
    net = LeNet5(NUM_CLASSES).to(device)
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(net.parameters(), lr=LEARNING_RATE)

    return net, criterion, optimizer
Beispiel #11
0
            global_step += 1
            print("Loss: {0}".format(loss.item()))
            print('done batch {0}'.format(global_step))

        writer.add_scalar("Loss-train", epoch_loss / len(data_loaded['train']),
                          global_step)

    writer.close()
    return model


if __name__ == "__main__":
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    net = LeNet5()
    net = net.to(device=device)

    images_path, labels_path = download_train_data(fashion_data_dir)
    data = load_train_data(images_path, labels_path)

    optimizer = optim.SGD(net.parameters(), weight_decay=1e-8, lr=0.01)
    loss_func = torch.nn.CrossEntropyLoss()

    net = train_net(net,
                    data,
                    epochs=20,
                    optimizer=optimizer,
                    loss_func=loss_func,
                    device=device)
Beispiel #12
0
from utils import prepare_mnist_data

# Net hyper parameters
NUM_EPOCHS = 15
NUM_CLASSES = 10
BATCH_SIZE = 100
LEARNING_RATE = 0.01
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'

print('Device is: {}'.format(DEVICE))

DATA_PATH = '/home/igor/Projects/exp-lenet5/data/datasets/MNISTData'
MODEL_STORE_PATH = '/home/igor/Projects/exp-lenet5/data/models'

train_loader, test_loader = prepare_mnist_data(DATA_PATH, BATCH_SIZE)
trainer = LeNet5Trainer(net=LeNet5(NUM_CLASSES).to(DEVICE), device=DEVICE)

trainingStartedAt = datetime.now()

net = trainer.train(train_data=train_loader,
                    learning_rate=LEARNING_RATE,
                    num_epochs=NUM_EPOCHS,
                    print_every=BATCH_SIZE)

print('Training took {}'.format((datetime.now() - trainingStartedAt)))

net_test = NetTest(net, DEVICE)

correct, total = net_test.test(test_data=test_loader)

print('Test Accuracy of the model on the 10000 test images: {}'.format(
Beispiel #13
0
def train(dataset):
    # Hyperparameters
    normalize = [True]
    learning_rates = {
        "mnist": [0.0001, 0.0005, 0.001],
        "fashion-mnist": [0.0001, 0.0005, 0.001],
        "cifar": [0.0001, 0.0005, 0.0008]
    }
    weight_decays = [0, 0.0005]
    num_epochs = 200

    # create a textfile to store the accuracies of each run
    f = open(dataset + "_accuracies.txt", "w")

    # dictionary for early stopping of training based on accuracy
    early_stop = {"mnist": 0.99, "fashion-mnist": 0.90, "cifar": 0.65}

    for norm in normalize:
        for learning_rate in learning_rates[dataset]:
            for decay in weight_decays:
                # read in the correct dataset
                train_loader, test_loader = load_data(dataset, norm)

                # define a new model to train
                leNet = LeNet5(dataset)

                # define the loss and optimizer
                loss_fn = nn.CrossEntropyLoss()
                opt = torch.optim.Adam(params=leNet.parameters(),
                                       lr=learning_rate,
                                       weight_decay=decay)

                # initialize the summaryWriter
                writer = SummaryWriter(
                    f'runs/{dataset}/Norm: {norm}, LR: {learning_rate}, Decay: {decay}'
                )

                print(
                    f'Training with Norm: {norm}, LR: {learning_rate}, Decay: {decay}...'
                )
                # Loop through all the epochs
                for epoch in range(num_epochs):
                    # initialize tqdm for a nice progress bar
                    loop = tqdm(enumerate(train_loader),
                                total=len(train_loader),
                                leave=False)
                    # initialize correct to 0
                    correct, total = 0, 0
                    # Loop through the dataloader
                    for _, (X, y) in loop:
                        # Prediction error
                        pred = leNet(X)  # Forward pass
                        loss = loss_fn(pred, y)  # Loss calculation

                        # Backpropagation
                        opt.zero_grad()  # Zero the gradient
                        loss.backward()  # Calculate updates

                        # Gradient Descent
                        opt.step()  # Apply updates

                        # check if correct and update the total number correct
                        correct += (pred.argmax(1) == y).type(
                            torch.float).sum().item()

                        # update the total size with the size of the batch
                        total += len(y)

                        # Update progress bar
                        loop.set_description(f"Epoch [{epoch+1}/{num_epochs}]")
                        loop.set_postfix(loss=loss.item())

                    # calculate the training accuracy
                    train_acc = correct / total

                    # get the testing accuracy
                    test_acc = test(test_loader, leNet, loss_fn)

                    # update the tensorboard summarywriter
                    writer.add_scalar("Training Accuracy", train_acc,
                                      epoch + 1)
                    writer.add_scalar("Testing Accuracy", test_acc, epoch + 1)

                    # check early stopping
                    if test_acc >= early_stop[dataset]:
                        break

                # get the final testing accuracy and output to text file
                final_test_acc = test(test_loader, leNet, loss_fn)
                print(f'Final Test Accuracy: {final_test_acc}')
                f.write(
                    f'Model Params [Norm: {norm}, LR: {learning_rate}, Decay: {decay}] - Final Accuracy after {epoch} epochs : {final_test_acc}'
                )
                f.write('\n\n')
                # close the tensorboard writer
                writer.close()

    f.close()
Beispiel #14
0
    classes = 10
    lr = 0.001

    # get data
    train_data_loader = datahelper.get_mnist_train_data_loader(
        image_size=image_size,
        batch_size=batch_size
    )

    test_data_loader = datahelper.get_mnist_test_data_loader(
        image_size=image_size,
        batch_size=batch_size
    )

    # instantiate model
    model = LeNet5(channels, classes, act='relu')

    # instantiate loss criterion and optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=lr)

    for epoch in range(epochs):
        for (batch, labels) in train_data_loader:

            # empty the gradients of the optimizer
            optimizer.zero_grad()

            # forward pass
            output = model(batch)

            # compute loss
Beispiel #15
0
# Data loader
train_loader = DataLoader(dataset=train_dataset,
                          batch_size=batch_size,
                          shuffle=True)

test_loader = DataLoader(dataset=test_dataset,
                         batch_size=batch_size,
                         shuffle=False)

# take a look at the dataset
x, label = iter(train_loader).next()
print('x: ', x.shape)
print('label: ', label.shape)

model = LeNet5().to(device)

# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

print(model)

# Train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
    model.train()
    for i, (images, labels) in enumerate(train_loader):
        images = images.to(device)
        labels = labels.to(device)
def train_generic_model(model_name="alexnet",
                        dataset="custom",
                        num_classes=-1,
                        batch_size=8,
                        is_transform=1,
                        num_workers=2,
                        lr_decay=1,
                        l2_reg=0,
                        hdf5_path="dataset-bosch-224x224.hdf5",
                        trainset_dir="./TRAIN_data_224_v8",
                        testset_dir="./TEST_data_224_v8",
                        convert_grey=False):
    CHKPT_PATH = "./checkpoint_{}.PTH".format(model_name)
    print("CUDA:")
    print(torch.cuda.is_available())
    if is_transform:

        trans_ls = []
        if convert_grey:
            trans_ls.append(transforms.Grayscale(num_output_channels=1))
        trans_ls.extend([
            transforms.Resize((224, 224)),
            # transforms.RandomCrop((224, 224)),
            # transforms.Grayscale(num_output_channels=1),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
        transform = transforms.Compose(trans_ls)
    else:
        transform = None

    print("DATASET FORMAT: {}".format(dataset))
    print("TRAINSET PATH: {}".format(trainset_dir))
    print("TESTSET PATH: {}".format(testset_dir))
    print("HDF5 PATH: {}".format(hdf5_path))
    if dataset == "custom":
        trainset = torchvision.datasets.ImageFolder(root=trainset_dir,
                                                    transform=transform)
        train_size = len(trainset)
        testset = torchvision.datasets.ImageFolder(root=testset_dir,
                                                   transform=transform)
        test_size = len(testset)
    elif dataset == "cifar":
        trainset = torchvision.datasets.CIFAR10(root="CIFAR_TRAIN_data",
                                                train=True,
                                                download=True,
                                                transform=transform)
        train_size = len(trainset)
        testset = torchvision.datasets.CIFAR10(root="CIFAR_TEST_data",
                                               train=False,
                                               download=True,
                                               transform=transform)
        test_size = len(testset)
    elif dataset == "hdf5":
        if num_workers == 1:
            trainset = Hdf5Dataset(hdf5_path,
                                   transform=transform,
                                   is_test=False)
        else:
            trainset = Hdf5DatasetMPI(hdf5_path,
                                      transform=transform,
                                      is_test=False)
        train_size = len(trainset)
        if num_workers == 1:
            testset = Hdf5Dataset(hdf5_path, transform=transform, is_test=True)
        else:
            testset = Hdf5DatasetMPI(hdf5_path,
                                     transform=transform,
                                     is_test=True)
        test_size = len(testset)

    train_loader = torch.utils.data.DataLoader(trainset,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=num_workers)

    test_loader = torch.utils.data.DataLoader(testset,
                                              batch_size=batch_size,
                                              shuffle=True,
                                              num_workers=num_workers)
    if model_name == "alexnet":
        net = AlexNet(num_classes=num_classes)
    elif model_name == "lenet5":
        net = LeNet5(num_classes=num_classes)
    elif model_name == "stn-alexnet":
        net = STNAlexNet(num_classes=num_classes)
    elif model_name == "stn-lenet5":
        net = LeNet5STN(num_classes=num_classes)
    elif model_name == "capsnet":
        net = CapsuleNet(num_classes=num_classes)
    elif model_name == "convneta":
        net = ConvNetA(num_classes=num_classes)
    elif model_name == "convnetb":
        net = ConvNetB(num_classes=num_classes)
    elif model_name == "convnetc":
        net = ConvNetC(num_classes=num_classes)
    elif model_name == "convnetd":
        net = ConvNetD(num_classes=num_classes)
    elif model_name == "convnete":
        net = ConvNetE(num_classes=num_classes)
    elif model_name == "convnetf":
        net = ConvNetF(num_classes=num_classes)
    elif model_name == "convnetg":
        net = ConvNetG(num_classes=num_classes)
    elif model_name == "convneth":
        net = ConvNetH(num_classes=num_classes)
    elif model_name == "convneti":
        net = ConvNetI(num_classes=num_classes)
    elif model_name == "convnetj":
        net = ConvNetJ(num_classes=num_classes)
    elif model_name == "convnetk":
        net = ConvNetK(num_classes=num_classes)
    elif model_name == "convnetl":
        net = ConvNetL(num_classes=num_classes)
    elif model_name == "convnetm":
        net = ConvNetM(num_classes=num_classes)
    elif model_name == "convnetn":
        net = ConvNetN(num_classes=num_classes)
    elif model_name == "resnet18":
        net = models.resnet18(pretrained=False, num_classes=num_classes)

    print(net)

    if torch.cuda.is_available():
        net = net.cuda()

    if model_name == "capsnet":
        criterion = CapsuleLoss()
    else:
        criterion = nn.CrossEntropyLoss()

    optimizer = optim.SGD(net.parameters(),
                          lr=LEARNING_RATE,
                          momentum=0.9,
                          weight_decay=l2_reg)

    if lr_decay:
        scheduler = ReduceLROnPlateau(optimizer, 'min')

    best_acc = 0
    from_epoch = 0

    if os.path.exists(CHKPT_PATH):
        print("Checkpoint Found: {}".format(CHKPT_PATH))
        state = torch.load(CHKPT_PATH)
        net.load_state_dict(state['state_dict'])
        optimizer.load_state_dict(state['optimizer'])
        best_acc = state['best_accuracy']
        from_epoch = state['epoch']

    for epoch in range(from_epoch, NUM_EPOCHS):
        #print("Epoch: {}/{}".format(epoch + 1, NUM_EPOCHS))
        epoch_loss = 0
        correct = 0
        for i, data in enumerate(train_loader, 0):
            #print("Train \t Epoch: {}/{} \t Batch: {}/{}".format(epoch + 1,
            #                                            NUM_EPOCHS,
            #                                            i + 1,
            #                                            ceil(train_size / BATCH_SIZE)))
            inputs, labels = data
            inputs, labels = Variable(inputs).type(torch.FloatTensor),\
                             Variable(labels).type(torch.LongTensor)

            if model_name == "capsnet":
                inputs = augmentation(inputs)
                ground_truth = torch.eye(num_classes).index_select(
                    dim=0, index=labels)

            if torch.cuda.is_available():
                inputs = inputs.cuda()
                labels = labels.cuda()

            optimizer.zero_grad()

            if model_name == "capsnet":
                classes, reconstructions = net(inputs, ground_truth)
                loss = criterion(inputs, ground_truth, classes,
                                 reconstructions)
            else:
                outputs = net(inputs)
                loss = criterion(outputs, labels)

            loss.backward()
            optimizer.step()
            epoch_loss += loss.data[0]
            if model_name != "capsnet":
                log_outputs = F.softmax(outputs, dim=1)
            else:
                log_outputs = classes
            pred = log_outputs.data.max(1, keepdim=True)[1]
            correct += pred.eq(labels.data.view_as(pred)).sum()

        print(
            "Epoch: {} \t Training Loss: {:.4f} \t Training Accuracy: {:.2f} \t {}/{}"
            .format(epoch + 1, epoch_loss / train_size,
                    100 * correct / train_size, correct, train_size))

        correct = 0
        test_loss = 0
        for i, data in enumerate(test_loader, 0):
            # print("Test \t Epoch: {}/{} \t Batch: {}/{}".format(epoch + 1,
            #                                             NUM_EPOCHS,
            #                                             i + 1,
            #                                             ceil(test_size / BATCH_SIZE)))
            inputs, labels = data
            inputs, labels = Variable(inputs).type(
                torch.FloatTensor), Variable(labels).type(torch.LongTensor)

            if model_name == "capsnet":
                inputs = augmentation(inputs)
                ground_truth = torch.eye(num_classes).index_select(
                    dim=0, index=labels)

            if torch.cuda.is_available():
                inputs = inputs.cuda()
                labels = labels.cuda()

            if model_name == "capsnet":
                classes, reconstructions = net(inputs)
                loss = criterion(inputs, ground_truth, classes,
                                 reconstructions)
            else:
                outputs = net(inputs)
                loss = criterion(outputs, labels)

            test_loss += loss.data[0]

            if model_name != "capsnet":
                log_outputs = F.softmax(outputs, dim=1)
            else:
                log_outputs = classes

            pred = log_outputs.data.max(1, keepdim=True)[1]
            correct += pred.eq(labels.data.view_as(pred)).sum()
        print(
            "Epoch: {} \t Testing Loss: {:.4f} \t Testing Accuracy: {:.2f} \t {}/{}"
            .format(epoch + 1, test_loss / test_size,
                    100 * correct / test_size, correct, test_size))
        if correct >= best_acc:
            if not os.path.exists("./models"):
                os.mkdir("./models")
            torch.save(
                net.state_dict(),
                "./models/model-{}-{}-{}-{}-val-acc-{:.2f}-train-{}-test-{}-epoch-{}.pb"
                .format(model_name, dataset, hdf5_path, str(datetime.now()),
                        100 * correct / test_size,
                        trainset_dir.replace(" ", "_").replace("/", "_"),
                        testset_dir.replace(" ", "_").replace("/",
                                                              "_"), epoch + 1))
        best_acc = max(best_acc, correct)

        # save checkpoint path
        state = {
            'epoch': epoch,
            'state_dict': net.state_dict(),
            'optimizer': optimizer.state_dict(),
            'best_accuracy': best_acc
        }
        torch.save(state, CHKPT_PATH)

        if lr_decay:
            # Note that step should be called after validate()
            scheduler.step(test_loss)

    print('Finished Training')

    print("")
    print("")