Exemplo n.º 1
0
def test_add_method():
    model = Sequential()

    model.add(Dense(n_nodes=32, n_inputs=45))

    model.build()

    with pytest.raises(Exception):
        model.add(Dense(n_nodes=32, n_inputs=45))
Exemplo n.º 2
0
from neuralpy.models import Sequential
from neuralpy.layers.convolutional import Conv2D
from neuralpy.layers.linear import Dense
from neuralpy.layers.other import Flatten
from neuralpy.layers.activation_functions import ReLU, Softmax
from neuralpy.loss_functions import CrossEntropyLoss
from neuralpy.optimizer import SGD
import torch
import torchvision
from torchvision import datasets, transforms

# Create a Sequential model Instance
model = Sequential()

#Build your network
model.add(Conv2D(input_shape=(1, 28, 28), filters=128, kernel_size=3))
model.add(ReLU())
model.add(Conv2D(filters=64, kernel_size=3))
model.add(ReLU())
model.add(Conv2D(filters=32, kernel_size=3))
model.add(ReLU())
model.add(Flatten())
model.add(Dense(n_nodes=10))

model.build()
model.compile(optimizer=SGD(),
              loss_function=CrossEntropyLoss(),
              metrics=["accuracy"])
print(model.summary())

#Get the MNIST dataset
Exemplo n.º 3
0
# Random seed for numpy
np.random.seed(1969)

# Generating the data
X_train = np.random.rand(100, 1) * 10
y_train = X_train + 5 *np.random.rand(100, 1)

X_validation = np.random.rand(100, 1) * 10
y_validation = X_validation + 5 * np.random.rand(100, 1)

X_test = np.random.rand(10, 1) * 10
y_test = X_test + 5 * np.random.rand(10, 1)

# Making the model
model = Sequential()
model.add(Dense(n_nodes=1, n_inputs=1))

# Building the model
model.build()

# Compiling the model
model.compile(optimizer=Adam(), loss_function=MSELoss())

# Printing model summary
model.summary()

# Training the model
history = model.fit(train_data=(X_train, y_train), validation_data=(X_validation, y_validation), epochs=300, batch_size=4)

# Predicting some values
Exemplo n.º 4
0
# Dependencies
from neuralpy.models import Sequential
from neuralpy.layers import Dense
from neuralpy.regularizers import Dropout
from neuralpy.activation_functions import ReLU
from neuralpy.loss_functions import CrossEntropyLoss
from neuralpy.optimizer import Adam

import pandas as pd
import numpy as np

# Model
model = Sequential()

model.add(Dense(n_nodes=64, n_inputs=784))
model.add(ReLU())

model.add(Dropout())

model.add(Dense(n_nodes=10))

model.build()

model.compile(optimizer=Adam(learning_rate=0.001,
                             betas=(0.9, 0.999),
                             eps=1e-08,
                             weight_decay=0.0,
                             amsgrad=False),
              loss_function=CrossEntropyLoss(),
              metrics=["accuracy"])
Exemplo n.º 5
0
import pytest
import numpy as np

np.random.seed(1969)

X_train = np.random.rand(100, 1) * 10
y_train = X_train + 5 * np.random.rand(100, 1)

X_validation = np.random.rand(100, 1) * 10
y_validation = X_validation + 5 * np.random.rand(100, 1)

X_test = np.random.rand(10, 1) * 10
y_test = X_test + 5 * np.random.rand(10, 1)

model = Sequential()
model.add(Dense(n_nodes=1, n_inputs=1))

model.build()

pytorch_model = model.get_model()


def train_generator():
    for i in range(40):
        X_train = np.random.rand(40, 1) * 10
        y_train = X_train + 5 * np.random.rand(40, 1)

        yield X_train, y_train

Exemplo n.º 6
0
from neuralpy.models import Sequential
from neuralpy.layers.linear import Dense
from neuralpy.layers.convolutional import Conv2D
from neuralpy.layers.activation_functions import ReLU,Softmax
from neuralpy.layers.pooling import MaxPool2D
from neuralpy.layers.other import Flatten
from neuralpy.loss_functions import CrossEntropyLoss
from neuralpy.optimizer import SGD,Adam

import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms

# Create a Sequential model Instance
model = Sequential()

model.add(Conv2D(input_shape=(1,224,224), filters=96, kernel_size=11, stride=4))
model.add(ReLU())
model.add(MaxPool2D(kernel_size=3, stride=2))
model.add(ReLU())
model.add(Conv2D(filters=256, kernel_size=5, stride=1, padding=2))
model.add(ReLU())
model.add(MaxPool2D(kernel_size=3, stride=2))
model.add(ReLU())
model.add(Conv2D(filters=384, kernel_size=3, stride=1, padding=1))
model.add(ReLU())
model.add(Conv2D(filters=384, kernel_size=3, stride=1, padding=1))
model.add(ReLU())
model.add(Conv2D(filters=256, kernel_size=3, stride=1, padding=1))
model.add(ReLU())
model.add(MaxPool2D(kernel_size=3, stride=2))
Exemplo n.º 7
0
# Dependencies
from neuralpy.models import Sequential
from neuralpy.layers.linear import Dense
from neuralpy.layers.regularizers import Dropout
from neuralpy.layers.activation_functions import ReLU
from neuralpy.loss_functions import CrossEntropyLoss
from neuralpy.optimizer import Adam

import pandas as pd
import numpy as np

# Model
model = Sequential()

model.add(Dense(n_nodes=264, n_inputs=784))
model.add(ReLU())

model.add(Dropout())

model.add(Dense(n_nodes=10))

model.build()

model.compile(optimizer=Adam(),
              loss_function=CrossEntropyLoss(),
              metrics=["accuracy"])

print(model.summary())

# Reading data
train_data = pd.read_csv("./data/mnist_train.csv", header=None)
Exemplo n.º 8
0
from neuralpy.models import Sequential
from neuralpy.layers.linear import Dense
from neuralpy.layers.activation_functions import ReLU
from neuralpy.loss_functions import MSELoss
from neuralpy.optimizer import Adam
import numpy as np


# Creating  Model
'''
This example will create an ann(Artifical Neural Network)
for a 3 input XOR logic

'''

model = Sequential()
model.add(Dense(n_nodes=1, n_inputs=3))
model.add(ReLU())
model.add(Dense(n_nodes=2))
model.add(ReLU())
model.add(Dense(n_nodes=1))
model.add(ReLU())

# Building the Model
model.build()

# Compiling
model.compile(optimizer=Adam(), loss_function=MSELoss(), metrics=["accuracy"])
print(model.summary())

# Data for XOR