Exemple #1
0
def load_mnist():
    # Get MNIST dataset
    x_train, t_train, x_test, t_test = mnist.load()
    X_train = []
    X_test = []

    # Reshape training and testing images from 784 to 28 by 28
    # Pad images by 2 to get 32 x 32 images. This is done to further center the digits in the images.
    # It coul dbe done within the first Conv layer, but it's faster to do it only once during the data prep.
    for i, img in enumerate(x_train):
        img = img.reshape(28, 28)
        img = numpy.pad(img, 2, mode='constant')
        img = img.reshape(32, 32, 1)

        X_train.append(img)

    for i, img in enumerate(x_test):
        img = img.reshape(28, 28)
        img = numpy.pad(img, 2, mode='constant')
        img = img.reshape(32, 32, 1)

        X_test.append(img)

    # Turn datasets into nupy arrays
    X_train = np.array(X_train, dtype=np.float64)
    X_test = np.array(X_test, dtype=np.float64)

    # Normalize dataset to have zero mean
    X_train -= int(np.mean(X_train))
    X_train /= int(np.std(X_train))

    X_test -= int(np.mean(X_test))
    X_test /= int(np.std(X_test))

    return X_train, X_test, t_train, t_test
Exemple #2
0
def test_fc():

    # Load data
    x_train, t_train, x_test, t_test = mnist.load()

    # Normalize data
    X_train = np.array(x_train, dtype=np.float64)
    X_test = np.array(x_test, dtype=np.float64)

    X_train -= int(np.mean(X_train))
    X_train /= int(np.std(X_train))

    X_test -= int(np.mean(X_test))
    X_test /= int(np.std(X_test))

    # Initialize network
    network = FC_2Layer()

    # Train Network
    epochs = 20
    for e in range(epochs):
        total_loss = 0
        correct = 0
        for i, img in enumerate(X_train):

            ### Forward Pass ###
            y_hat = network.forward(img)

            ### Cross Entropy Loss ###
            y = t_train[i]
            loss, y = ops.cross_entropy(y, y_hat)
            total_loss += loss

            # Calculate Accuracy
            if list(y_hat).index(max(y_hat)) == list(y).index(max(y)):
                correct += 1

            ### Backward Pass ###
            network.backward(y)

            if i % 5000 == 0:
                print("Epoch " + str(e) + ", Sample " + str(i) + ", Loss: " +
                      str(loss)[:9] + ", ACC: " + str(correct / (i + 1)))
                #print("time elapsed: " + str(time.time() - start_time))

        print("Epoch " + str(e) + " Loss: " +
              str(total_loss / len(X_train))[:9] + " Acc: " +
              str(correct / len(X_train)))
Exemple #3
0
 def test(self):
     itime = time()
     train_cumloss = train_cumhits = 0
     epoch = next(mnist.load(batch_size=self.batch_size))
     nof_batches = int(60000 / BATCH_SIZE)
     for batch in epoch:
         cumloss, cumhits = self.batch_eval(batch,
                                            grads=False,
                                            hitrate=True)
         train_cumloss += cumloss
         train_cumhits += cumhits
     train_avgloss = train_cumloss / nof_batches
     train_hitrate = train_cumhits / nof_batches
     test_time = time() - itime
     print(
         f"[E] {train_avgloss=:.6f} {train_hitrate=:.2f}% {test_time=:.2f}s"
     )
Exemple #4
0
class GaNet(Subject, NeuralNetwork):

    dlayers: List

    __mnist_db = mnist.load(batch_size=BATCH_SIZE)
    __batch_gen = (batch for epoch in __mnist_db for batch in epoch)

    _genome: List[float]
    _fitness: float

    def __init__(self, params):
        """
        Precondition: use set_layers_description() before any instanciation
        so dlayers is initialized
        """
        super().__init__(GaNet.dlayers, BATCH_SIZE, params=params)
        self._genome = params
        self._fitness = None

    @classmethod
    def create_random(cls):
        return GaNet(cls.get_random_params())

    @property
    def genome(self) -> List[float]:
        return self._genome

    @property
    def fitness(self) -> float:
        if not self._fitness:
            self._fitness = -self.batch_cost()
        return self._fitness

    def batch_cost(self):
        random_batch = next(GaNet.__batch_gen)
        error = self.batch_eval(random_batch, grads=False)
        return error

    def test(self):
        itime = time()
        train_cumloss = train_cumhits = 0
        epoch = next(mnist.load(batch_size=self.batch_size))
        nof_batches = int(60000 / BATCH_SIZE)
        for batch in epoch:
            cumloss, cumhits = self.batch_eval(batch,
                                               grads=False,
                                               hitrate=True)
            train_cumloss += cumloss
            train_cumhits += cumhits
        train_avgloss = train_cumloss / nof_batches
        train_hitrate = train_cumhits / nof_batches
        test_time = time() - itime
        print(
            f"[E] {train_avgloss=:.6f} {train_hitrate=:.2f}% {test_time=:.2f}s"
        )

    # TODO: Think more about this and make it
    # Maybe a urand in [c +- d] range with c = (min + max) / 2, d = max - min
    @staticmethod
    def mutate(gen):
        return gen + randn()

    @classmethod
    def set_layers_description(cls, dlayers):
        """
        Override of NeuralNetwork method to make it static.
        dlayers will be used as a static attribute of GANeuralNetwork class
        """
        cls.dlayers = dlayers

    @classmethod
    def get_random_params(cls):
        return super().get_random_params_custom(cls)
Exemple #5
0
from numpy import random

from network import network
from mnist import mnist as data

# Loading training and test sets
training_set, test_set = data.load()
print("Successfully loaded data. Initializing network...")

# Creating network
hidden_layers = [3]
x, y = training_set[0]
input_size = x.shape[0]
output_size = y.shape[0]

net = network(input_size, output_size, hidden_layers)
print("Successfully initialized network. Training network...")

# Training variables
learning_rate = 1
set_size = 150
num_cycles = 30

net.train(training_set, learning_rate, set_size, num_cycles)
print("Successfully trained network. Validating network...")

test_outputs = []

for i in range(len(test_set)):
    x, y = test_set[i]
    test_outputs.append(net.forwardpass(x))
Exemple #6
0
np.random.seed(1)  # TODO: Remove?
WTOL = 10  # weights must be within [-WTOL, +WTOL]
COMPLETENESS = 0.05  # ratio of loops that will be effectively tested
BATCH_SIZE = 1000 # TESTMARK
DTYPE = np.dtype("float32") # TESTMARK

# TODO: Needed for now, but it should be a cleaner way to set BATCH_SIZE and DTYPE globally
assert BATCH_SIZE == 1 and DTYPE == np.dtype(
    "float64"
), (
    "Modify all .py[x] files to use BATCH_SIZE = 1 and float64 when testing"
    "Search globally for TESTMARK to find where to apply the changes."
)

EPOCHS = mnist.load(batch_size=BATCH_SIZE)  # Epoch batches generator
BATCHES = next(EPOCHS)  # Epoch mini batches
BATCH = BATCHES[0]
# Single sample batch used for testing
INPUT = BATCH[0][0][np.newaxis, :]
TARGET = BATCH[1][0][np.newaxis, :]


@dataclass
class Loop:
    """Helper utility class to count test loops. Logs and skips loops."""

    def __init__(self, log_text, log_every, total_loops, cover_ratio) -> None:
        self.log_text = log_text
        self.log_every = log_every
        self.total_loops = total_loops
Exemple #7
0
def main():
    net = CyNet(DLAYERS, BATCH_SIZE)
    trainbatches_gen = mnist.load("training", BATCH_SIZE, autoencoder=AUTOENCODER)
    testbatches_gen = mnist.load("testing", BATCH_SIZE, autoencoder=AUTOENCODER)
    train(net, trainbatches_gen, testbatches_gen)