Exemplo n.º 1
0
# Create a Sequential model Instance
model = Sequential()

#Build your network
model.add(Conv2D(input_shape=(1, 28, 28), filters=128, kernel_size=3))
model.add(ReLU())
model.add(Conv2D(filters=64, kernel_size=3))
model.add(ReLU())
model.add(Conv2D(filters=32, kernel_size=3))
model.add(ReLU())
model.add(Flatten())
model.add(Dense(n_nodes=10))

model.build()
model.compile(optimizer=SGD(),
              loss_function=CrossEntropyLoss(),
              metrics=["accuracy"])
print(model.summary())

#Get the MNIST dataset
train_set = torchvision.datasets.MNIST(root='./data',
                                       train=True,
                                       download=True,
                                       transform=transforms.Compose(
                                           [transforms.ToTensor()]))

# Load the dataset from pytorch's Dataloader function
train_loader = torch.utils.data.DataLoader(train_set, batch_size=1000)
#Get the data
mnist_data = next(iter(train_loader))
#Split into train and test set
Exemplo n.º 2
0
# Model
model = Sequential()

model.add(Dense(n_nodes=64, n_inputs=784))
model.add(ReLU())

model.add(Dropout())

model.add(Dense(n_nodes=10))

model.build()

model.compile(optimizer=Adam(learning_rate=0.001,
                             betas=(0.9, 0.999),
                             eps=1e-08,
                             weight_decay=0.0,
                             amsgrad=False),
              loss_function=CrossEntropyLoss(),
              metrics=["accuracy"])

print(model.summary())

# Reading data
train_data = pd.read_csv("./data/mnist_train.csv")
test_data = pd.read_csv("./data/mnist_test.csv")

train_data = train_data.sample(frac=1)
train_data = train_data.values

test_data = test_data.sample(frac=1)
test_data = test_data.values
Exemplo n.º 3
0
# Generating the data
X_train = np.random.rand(100, 1) * 10
y_train = X_train + 5 *np.random.rand(100, 1)

X_validation = np.random.rand(100, 1) * 10
y_validation = X_validation + 5 * np.random.rand(100, 1)

X_test = np.random.rand(10, 1) * 10
y_test = X_test + 5 * np.random.rand(10, 1)

# Making the model
model = Sequential()
model.add(Dense(n_nodes=1, n_inputs=1))

# Building the model
model.build()

# Compiling the model
model.compile(optimizer=Adam(), loss_function=MSELoss())

# Printing model summary
model.summary()

# Training the model
history = model.fit(train_data=(X_train, y_train), validation_data=(X_validation, y_validation), epochs=300, batch_size=4)

# Predicting some values
evaluated = model.evaluate(test_data=(X_test, y_test), batch_size=4)

print(evaluated)
Exemplo n.º 4
0
'''

model = Sequential()
model.add(Dense(n_nodes=1, n_inputs=3))
model.add(ReLU())
model.add(Dense(n_nodes=2))
model.add(ReLU())
model.add(Dense(n_nodes=1))
model.add(ReLU())

# Building the Model
model.build()

# Compiling
model.compile(optimizer=Adam(), loss_function=MSELoss(), metrics=["accuracy"])
print(model.summary())

# Data for XOR

x_train = np.array([[0, 0, 1], [0, 1, 1], [1, 0, 1]])
x_test = np.array([[1, 1, 1]])
y_train = np.array([[0], [1], [1]], dtype=np.float32)
y_test = np.array([[0]], dtype=np.float32)

# Training the model
model.fit(train_data=(x_train, y_train), epochs=20, batch_size=1)

# Prediction
print(model.predict(x_test[0]))