Exemple #1
0
def trainForAsigment(n_epochs, n_hidden_layers, mode, mnist_data, mnist_test):
    optimizer_dict = getOptimizer(mode)
    optimizerFunc = optimizer_dict["optimizerFunc"]
    has_momentum = optimizer_dict["has_momentum"]
    momentum = optimizer_dict["momentum"]
    learning_rate = optimizer_dict["learning_rate"]
    update_lr = optimizer_dict["update_lr"]
    lossFunc = nn.MSELoss()
    hf.myprint("Using MSE as loss function")
    dataset_full = torch.utils.data.ConcatDataset([mnist_data, mnist_test])
    new_train, new_test = torch.utils.data.random_split(
        dataset_full, [50000, 20000])
    return trainAutoencoder(n_hidden_layers,
                            new_train,
                            new_test,
                            saved_encoder=None,
                            n_epochs=n_epochs,
                            optimizerFunc=optimizerFunc,
                            lossFunc=lossFunc,
                            learning_rate=learning_rate,
                            update_lr=update_lr,
                            has_momentum=has_momentum,
                            momentum=momentum,
                            batch_size=1000,
                            dropout_rate=0.1,
                            log=True,
                            log_interval=10)
Exemple #2
0
def train_cnn_for_assigment(n_epochs, mnist_data, mnist_test):
    optimizerFunc = torch.optim.Adam
    has_momentum = False
    momentum = None
    learning_rate = 0.001
    update_lr = False
    lossFunc = nn.MSELoss()
    hf.myprint("Using MSE as loss function")
    dataset_full = torch.utils.data.ConcatDataset([mnist_data, mnist_test])
    new_train, new_test = torch.utils.data.random_split(
        dataset_full, [50000, 20000])
    return train_cnn(new_train,
                     new_test,
                     saved_encoder=None,
                     n_epochs=n_epochs,
                     optimizerFunc=optimizerFunc,
                     lossFunc=lossFunc,
                     learning_rate=learning_rate,
                     update_lr=update_lr,
                     has_momentum=has_momentum,
                     momentum=momentum,
                     batch_size=1000,
                     dropout_rate=0.1,
                     log=True,
                     log_interval=10)
Exemple #3
0
def get_learning_rate_update(epoch, learning_rate):
    if (epoch % 10 == 0 and epoch != 0):
        new_learning_rate = learning_rate / 1.5
        hf.myprint(
            f"Lowering learning rate from {learning_rate} to {new_learning_rate}"
        )
        return new_learning_rate
    else:
        return learning_rate
Exemple #4
0
def evaluateSavedNetwork(filename):
    train_loader = torch.utils.data.DataLoader(mnist_data,
                                               batch_size=glval.batch_size,
                                               shuffle=True)
    test_loader = torch.utils.data.DataLoader(mnist_test,
                                              batch_size=glval.batch_size)
    saved_encoder = torch.load(filename)
    test_results = ac.testEncoder(saved_encoder, test_loader, train_loader,
                                  nn.MSELoss(), ac.Flatten())
    hf.myprint(
        f"Saved Network Train Loss: {test_results[0]} \n\tSaved Network Test Loss: {test_results[1]}"
    )
    ac.compareEncoder(saved_encoder, mnist_test, (0, 20), save=True)
def train_for_assigment(ac_name, a_encoder, n_epochs, mnist_data, mnist_test):
    hf.myprint(f"Starting assigment for {ac_name}...")
    optimizerFunc = torch.optim.Adam
    learning_rate = 0.001
    optimizer = optimizerFunc(a_encoder.parameters(), lr=learning_rate)
    batch_size = 250
    # lossFunc = nn.MSELoss()
    lossFunc = nn.BCELoss()
    dataset_full = torch.utils.data.ConcatDataset([mnist_data,mnist_test])
    mnist_data, mnist_test = torch.utils.data.random_split(dataset_full, [50000,20000])
    train_loader = torch.utils.data.DataLoader(mnist_data, batch_size=batch_size, shuffle=True)
    test_loader  = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size)
    train_losses_av = []
    train_error_arr = []
    test_error_arr = [] 
    test_results = testEncoder(a_encoder, test_loader, train_loader, lossFunc)
    hf.myprint(f"Training {ac_name} with {n_epochs} epochs. Using MSE as loss function and Adam as optimizer with learning rate: {learning_rate}")
    hf.myprint(f"Starting Train Loss: {test_results[0]} \n\tStarting Test  Loss: {test_results[1]}")
    for epoch in range(n_epochs):
        train_epoch(epoch, a_encoder, optimizer, lossFunc, train_loader, train_losses_av)
        test_results = testEncoder(a_encoder, test_loader, train_loader, lossFunc)
        train_error_arr.append(test_results[0])
        test_error_arr.append(test_results[1])
        hf.myprint(f"Current av Train Loss: {test_results[0]} \n\tCurrent av Test  Loss: {test_results[1]}")
    return_dict = {
        "autoencoder": a_encoder,
        "train_losses_av": train_losses_av,
        "train_error_arr": train_error_arr,
        "test_error_arr": test_error_arr
    }
    return return_dict
def trainEpoch(epoch, a_encoder, optimizer, lossFunc, train_loader,
               train_losses_av):
    a_encoder.train()
    train_loss_av = 0
    for batch_n, (data, _) in enumerate(train_loader):
        optimizer.zero_grad()
        output = a_encoder(data)
        loss = lossFunc(output, data)
        train_loss_av += loss.detach().item()
        loss.backward()
        optimizer.step()
        if (batch_n % 5 == 0):
            hf.myprint(
                'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                    epoch + 1, batch_n * len(data), len(train_loader.dataset),
                    100. * batch_n / len(train_loader), loss.item()))
    train_loss_av /= len(train_loader)
    hf.myprint(f"Train loss Average: {train_loss_av}")
    train_losses_av.append(train_loss_av)
Exemple #7
0
def train_epoch(epoch, autoenc, optimizer, lossFunc, flatten, train_loader,
                log, log_interval, train_losses_drop):
    autoenc.train()
    train_loss_drop = 0
    for batch_n, (data, _) in enumerate(train_loader):
        optimizer.zero_grad()
        output = autoenc(data)
        loss = lossFunc(output, flatten(data))
        train_loss_drop += loss.detach().item()
        loss.backward()
        optimizer.step()
        if (log and batch_n % log_interval == 0):
            hf.myprint(
                'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                    epoch + 1, batch_n * len(data), len(train_loader.dataset),
                    100. * batch_n / len(train_loader), loss.item()))
    train_loss_drop /= len(train_loader)
    hf.myprint(f"Train loss Drop: {train_loss_drop}")
    train_losses_drop.append(train_loss_drop)
def train_epoch(epoch, autoenc, optimizer, lossFunc, train_loader, train_losses_av):
    autoenc.train()
    train_loss_av = 0
    log_interval = 10
    for batch_n, (data, _) in enumerate(train_loader):
        data = data.to(global_device)
        optimizer.zero_grad()
        output = autoenc(data)
        loss = get_loss(autoenc, data, lossFunc)
        train_loss_av += loss.detach().item()
        loss.backward()
        optimizer.step()
        if (batch_n % log_interval == 0):
            hf.myprint('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch+1, batch_n * len(data), len(train_loader.dataset),
                100. * batch_n / len(train_loader), loss.item()))
    train_loss_av /= len(train_loader)
    hf.myprint(f"Train loss average: {train_loss_av}")
    train_losses_av.append(train_loss_av)
Exemple #9
0
def plotComparison():
    #print(type(hf.loadTrainingLog(64, 0)))
    hiddenlis = [64, 128, 256, 512]
    for m in range(3):
        dic_lis = [hf.loadTrainingLog(h, m) for h in hiddenlis]
        _, ax = plt.subplots(figsize=(10, 5))
        ax.set_xlim(-0.2, 40 + 0.5)
        ax.set_ylabel('Error')
        ax.set_yscale("log")
        ax.set_xlabel('Epoch')
        optimFunc = ["SGD", "Adam", "RMSprop"]
        plt.title(label=f"Testing Loss with {optimFunc[m]}")
        x_values = [x for x in range(40 + 1)]
        for i, dic in enumerate(dic_lis):
            ax.plot(np.array(x_values),
                    np.array(dic["test_error_arr"]),
                    label=f"{hiddenlis[i]} hidden neurons")
        ax.legend(loc="upper right")
        filename = f'graph_b_m{m}.png'
        plt.savefig(filename, dpi=200)
        hf.myprint("\tSaved " + filename)
        plt.clf()

    for h in hiddenlis:
        dic_lis = [hf.loadTrainingLog(h, m) for m in range(3)]
        _, ax = plt.subplots(figsize=(10, 5))
        ax.set_xlim(-0.2, 40 + 0.5)
        ax.set_ylabel('Error')
        ax.set_yscale("log")
        ax.set_xlabel('Epoch')
        optimFunc = ["SGD", "Adam", "RMSprop"]
        plt.title(label=f"Testing Loss with {h} neurons on the hidden layer")
        x_values = [x for x in range(40 + 1)]
        for m, dic in enumerate(dic_lis):
            ax.plot(np.array(x_values),
                    np.array(dic["test_error_arr"]),
                    label=f"{optimFunc[m]}")
        ax.legend(loc="upper right")
        filename = f'graph_c_h{h}.png'
        plt.savefig(filename, dpi=200)
        hf.myprint("\tSaved " + filename)
        plt.clf()
Exemple #10
0
def getOptimizer(mode):
    if mode == 0:
        hf.myprint("Using SGD as optimizer")
        dic = {
            "optimizerFunc": torch.optim.SGD,
            "has_momentum": True,
            "momentum": 0.5,
            "learning_rate": 0.5,
            "update_lr": True
        }
        return dic
    elif mode == 1:
        hf.myprint("Using Adam as optimizer")
        dic = {
            "optimizerFunc": torch.optim.Adam,
            "has_momentum": False,
            "momentum": None,
            "learning_rate": 0.001,
            "update_lr": True
        }
        return dic
    elif mode == 2:
        hf.myprint("Using RMSprop as optimizer")
        dic = {
            "optimizerFunc": torch.optim.RMSprop,
            "has_momentum": True,
            "momentum": 0.1,
            "learning_rate": 0.001,
            "update_lr": False
        }
        return dic
    else:
        raise ValueError("Incorrect Mode in getOptimizer()")
def testEncoder(an_encoder, test_loader, train_loader):
    hf.myprint("Testing encoder...")
    an_encoder.eval()
    loss_func1 = nn.MSELoss()
    loss_func2 = nn.BCELoss()
    test_loss1 = 0
    test_loss2 = 0
    train_loss1 = 0
    train_loss2 = 0
    with torch.no_grad():
        for data, _ in test_loader:
            output = an_encoder(data)
            test_loss1 += loss_func1(output, data).item()
            test_loss2 += loss_func2(output, data).item()
        for data, _ in train_loader:
            output = an_encoder(data)
            train_loss1 += loss_func1(output, data).item()
            train_loss2 += loss_func2(output, data).item()
    train_loss1 /= len(train_loader)
    train_loss2 /= len(train_loader)
    test_loss1 /= len(test_loader)
    test_loss2 /= len(test_loader)
    return (train_loss1, train_loss2, test_loss1, test_loss2)
Exemple #12
0
def compareEncoder(autoenc,
                   mnist_test,
                   number_range,
                   save=False,
                   basename='',
                   tight=False):
    flatten = Flatten()
    if type(number_range) == type(tuple):
        rang = range(number_range[0], number_range[1])
    else:
        rang = number_range
    for i in rang:
        _, axes = plt.subplots(figsize=(10, 6), ncols=2, nrows=1)
        axes[0].imshow(mnist_test[i][0][0], cmap='gray')
        img = autoenc(flatten(
            mnist_test[i][0]).unsqueeze(0)).detach().numpy().reshape([28, 28])
        axes[1].imshow(
            img,
            cmap='gray',
        )
        if save:
            filename = basename + f'_comp{i}.png'
            if tight:
                plt.axis('off')  #doesn't work
                plt.savefig(filename,
                            dpi=150,
                            bbox_inches='tight',
                            transparent="True",
                            pad_inches=0)
            else:
                plt.savefig(filename, dpi=200)

            hf.myprint("\tSaved " + filename)
            plt.clf()
        else:
            plt.show()
def train_cnn(n_epochs, mnist_data, mnist_test):
    optimizerFunc = torch.optim.Adam
    learning_rate = 0.001
    lossFunc = nn.BCELoss()
    batch_size = 1000
    a_encoder = cnn_aut()
    hf.myprint(f"Training Convolutionl autoencoder with {n_epochs} epochs")
    hf.myprint(
        f"Using Binary Cross Entropy as loss function. Adam as optimizer with learning rate: {learning_rate}"
    )
    dataset_full = torch.utils.data.ConcatDataset([mnist_data, mnist_test])
    new_train, new_test = torch.utils.data.random_split(
        dataset_full, [50000, 20000])

    train_loader = torch.utils.data.DataLoader(mnist_data,
                                               batch_size=batch_size,
                                               shuffle=True)
    test_loader = torch.utils.data.DataLoader(mnist_test,
                                              batch_size=batch_size)
    optimizer = optimizerFunc(a_encoder.parameters(), lr=learning_rate)
    train_losses_av = []
    train_error_arr1 = []
    train_error_arr2 = []
    test_error_arr1 = []
    test_error_arr2 = []
    test_results = testEncoder(a_encoder, test_loader, train_loader)
    train_error_arr1.append(test_results[0])
    train_error_arr2.append(test_results[1])
    test_error_arr1.append(test_results[2])
    test_error_arr2.append(test_results[3])
    hf.myprint(
        f"Starting Train MSELoss: {test_results[0]} \n\tStarting Test MSELoss: {test_results[2]}"
    )
    hf.myprint(
        f"Starting Train BCELoss: {test_results[1]} \n\tStarting Test BCELoss: {test_results[3]}"
    )

    for epoch in range(n_epochs):
        optimizer = optimizerFunc(a_encoder.parameters(), lr=learning_rate)
        trainEpoch(epoch, a_encoder, optimizer, lossFunc, train_loader,
                   train_losses_av)
        test_results = testEncoder(a_encoder, test_loader, train_loader)
        train_error_arr1.append(test_results[0])
        train_error_arr2.append(test_results[1])
        test_error_arr1.append(test_results[2])
        test_error_arr2.append(test_results[3])
        hf.myprint(
            f"Current av Train MSELoss: {test_results[0]} \n\tCurrent av Test MSELoss: {test_results[2]}"
        )
        hf.myprint(
            f"Current av Train BCELoss: {test_results[1]} \n\tCurrent av Test BCELoss: {test_results[3]}"
        )

    return_dict = {
        "autoencoder": a_encoder,
        "train_losses_av": train_losses_av,
        "train_error_arr1": train_error_arr1,
        "train_error_arr2": train_error_arr2,
        "test_error_arr1": test_error_arr1,
        "test_error_arr2": test_error_arr2
    }
    return return_dict
Exemple #14
0
def trainAutoencoder(amount_hidden,
                     mnist_data,
                     mnist_test,
                     saved_encoder=None,
                     n_epochs=glval.n_epochs,
                     optimizerFunc=glval.optimizerFunc,
                     lossFunc=glval.lossFunc,
                     learning_rate=glval.learning_rate,
                     update_lr=False,
                     has_momentum=glval.has_momentum,
                     momentum=glval.momentum,
                     batch_size=glval.batch_size,
                     dropout_rate=glval.dropout_rate,
                     log=True,
                     log_interval=glval.log_interval):
    hf.myprint(
        f"Training autoencoder with of {amount_hidden} hidden layers with {n_epochs} epochs"
    )
    mom = 0 if not has_momentum else momentum
    hf.myprint(
        f"learning rate: {learning_rate} - dropout rate: {dropout_rate} - momentum: {mom}"
    )
    hf.myprint(
        f"Testing dataset size:{len(mnist_test)} - Training dataset size:{len(mnist_data)}"
    )
    train_loader = torch.utils.data.DataLoader(mnist_data,
                                               batch_size=batch_size,
                                               shuffle=True)
    test_loader = torch.utils.data.DataLoader(mnist_test,
                                              batch_size=batch_size)
    if saved_encoder == None:
        a_encoder = autoencoder(size_hidden=amount_hidden,
                                dropout=dropout_rate)
    else:
        hf.myprint("Loaded saved network")
        a_encoder = saved_encoder
    if has_momentum:
        optimizer = optimizerFunc(a_encoder.parameters(),
                                  lr=learning_rate,
                                  momentum=momentum)
    else:
        optimizer = optimizerFunc(a_encoder.parameters(), lr=learning_rate)
    flatten = Flatten()
    train_losses_drop = []
    train_error_arr = []
    test_error_arr = []
    test_results = testEncoder(a_encoder, test_loader, train_loader, lossFunc,
                               flatten)
    train_error_arr.append(test_results[0])
    test_error_arr.append(test_results[1])
    if log:
        hf.myprint(
            f"Starting Train Loss: {test_results[0]} \n\tStarting Test  Loss: {test_results[1]}"
        )
    for epoch in range(n_epochs):
        learning_rate = get_learning_rate_update(epoch, learning_rate)
        if update_lr:
            if has_momentum:
                optimizer = optimizerFunc(a_encoder.parameters(),
                                          lr=learning_rate,
                                          momentum=momentum)
            else:
                optimizer = optimizerFunc(a_encoder.parameters(),
                                          lr=learning_rate)
        train_epoch(epoch, a_encoder, optimizer, lossFunc, flatten,
                    train_loader, log, log_interval, train_losses_drop)
        test_results = testEncoder(a_encoder, test_loader, train_loader,
                                   lossFunc, flatten)
        train_error_arr.append(test_results[0])
        test_error_arr.append(test_results[1])
        if log:
            hf.myprint(
                f"Current av Train Loss: {test_results[0]} \n\tCurrent av Test  Loss: {test_results[1]}"
            )
    return_dict = {
        "autoencoder": a_encoder,
        "train_losses_drop": train_losses_drop,
        "train_error_arr": train_error_arr,
        "test_error_arr": test_error_arr
    }
    return return_dict
Exemple #15
0
import torchvision.datasets as datasets
from torch import nn
import autoencoder as ac
import global_var as glval
import warnings
import seaborn as sns
import json

from final_autoencoder import train_cnn

hf.lineprint("Loading datasets and parameters...")
warnings.filterwarnings("ignore", category=UserWarning)
sns.set_style('darkgrid')
sns.set_context('talk')
sns.set(font_scale=0.7)
hf.myprint("Completed")
torch.manual_seed(12345678)

try:
    hf.myprint(f"Running on {torch.cuda.get_device_name(0)}")
except Exception:
    hf.myprint("No GPU available")

mnist_mean = 0.1307
mnist_standard_deviation = 0.3081

mnist_data = datasets.MNIST('./data',
                            train=True,
                            download=True,
                            transform=torchvision.transforms.Compose([
                                torchvision.transforms.ToTensor(),
Exemple #16
0
def runConvolutionalAutoencoder(number_of_epochs,
                                plot_graph=False,
                                save_network=False,
                                save_training_log=False,
                                compare=True,
                                log=True,
                                show=True):
    hf.lineprint("Processing autoencoder...")
    trained_dict = ac.train_cnn_for_assigment(number_of_epochs,
                                              mnist_data=mnist_data,
                                              mnist_test=mnist_test)
    basename = f"cnn_epch{number_of_epochs}"
    trained_encoder = trained_dict["autoencoder"]
    if compare:
        ac.compareEncoder(trained_encoder,
                          mnist_test, (0, 50),
                          save=True,
                          basename=basename)
    if save_network:
        torch.save(trained_encoder, basename + ".savednn")
    if save_training_log:
        with open(basename + "_traintest.txt", 'w') as f:
            writedict = trained_dict
            del writedict["autoencoder"]
            f.write(json.dumps(writedict))
    if plot_graph:
        _, ax = plt.subplots(figsize=(10, 5))
        ax.set_xlim(-0.2, number_of_epochs + 0.5)
        ax.set_ylabel('Error')
        ax.set_yscale("log")
        ax.set_xlabel('Epoch')
        x_values = [x for x in range(number_of_epochs + 1)]
        ax.plot(np.array(x_values),
                np.array(trained_dict["test_error_arr"]),
                label="Test Error")
        ax.plot(np.array(x_values),
                np.array(trained_dict["train_error_arr"]),
                label="Training Error")
        ax.legend(loc="upper right")
        filename = f'asignmentGraph_a_h{number_hidden}_epch{number_of_epochs}_m{mode}.png'
        plt.savefig(filename, dpi=200)
        hf.myprint("\tSaved " + filename)
        plt.clf()

        _, ax = plt.subplots(figsize=(10, 5))
        ax.set_xlim(5, number_of_epochs)
        ax.set_ylabel('Error')
        ax.set_yscale("log")
        ax.set_xlabel('Epoch')
        x_values = [x for x in range(5, number_of_epochs + 1)]
        ax.plot(np.array(x_values),
                np.array(trained_dict["test_error_arr"][5:]),
                label="Test Error")
        ax.plot(np.array(x_values),
                np.array(trained_dict["train_error_arr"][5:]),
                label="Training Error")
        ax.legend(loc="upper right")
        filename = f'asignmentGraph_a_h{number_hidden}_epch{number_of_epochs}_m{mode}_zoomed.png'
        plt.savefig(filename, dpi=200)
        hf.myprint("\tSaved " + filename)
        plt.clf()