Exemplo n.º 1
0
model.add(Flatten())
model.add(Dense(n_nodes=10))

model.build()
model.compile(optimizer=SGD(),
              loss_function=CrossEntropyLoss(),
              metrics=["accuracy"])
print(model.summary())

#Get the MNIST dataset
train_set = torchvision.datasets.MNIST(root='./data',
                                       train=True,
                                       download=True,
                                       transform=transforms.Compose(
                                           [transforms.ToTensor()]))

# Load the dataset from pytorch's Dataloader function
train_loader = torch.utils.data.DataLoader(train_set, batch_size=1000)
#Get the data
mnist_data = next(iter(train_loader))
#Split into train and test set
train_imgs = mnist_data[0][:500]
train_labels = mnist_data[1][:500]
test_imgs = mnist_data[0][500:]
test_labels = mnist_data[1][500:]
#Train the model
model.fit(train_data=(train_imgs, train_labels),
          validation_data=(test_imgs, test_labels),
          epochs=10,
          batch_size=10)
Exemplo n.º 2
0
test_data = test_data.sample(frac=1)
test_data = test_data.values

X = train_data[:, 1:] / 255.
y = train_data[:, 0]

X_test = test_data[:, 1:]
y_test = test_data[:, 0]

del train_data

n = len(X)

X_train = X[:int(n * 0.8)]
y_train = y[:int(n * 0.8)]

X_validation = X[int(n * 0.8):]
y_validation = y[int(n * 0.8):]

del X, y

model.fit(train_data=(X_train, y_train),
          validation_data=(X_validation, y_validation),
          epochs=5,
          batch_size=32)

ev = model.evaluate(X=X_test, y=y_test, batch_size=32)

print(ev)
Exemplo n.º 3
0
# Generating the data
X_train = np.random.rand(100, 1) * 10
y_train = X_train + 5 *np.random.rand(100, 1)

X_validation = np.random.rand(100, 1) * 10
y_validation = X_validation + 5 * np.random.rand(100, 1)

X_test = np.random.rand(10, 1) * 10
y_test = X_test + 5 * np.random.rand(10, 1)

# Making the model
model = Sequential()
model.add(Dense(n_nodes=1, n_inputs=1))

# Building the model
model.build()

# Compiling the model
model.compile(optimizer=Adam(), loss_function=MSELoss())

# Printing model summary
model.summary()

# Training the model
history = model.fit(train_data=(X_train, y_train), validation_data=(X_validation, y_validation), epochs=300, batch_size=4)

# Predicting some values
evaluated = model.evaluate(test_data=(X_test, y_test), batch_size=4)

print(evaluated)
Exemplo n.º 4
0
'''

model = Sequential()
model.add(Dense(n_nodes=1, n_inputs=3))
model.add(ReLU())
model.add(Dense(n_nodes=2))
model.add(ReLU())
model.add(Dense(n_nodes=1))
model.add(ReLU())

# Building the Model
model.build()

# Compiling
model.compile(optimizer=Adam(), loss_function=MSELoss(), metrics=["accuracy"])
print(model.summary())

# Data for XOR

x_train = np.array([[0, 0, 1], [0, 1, 1], [1, 0, 1]])
x_test = np.array([[1, 1, 1]])
y_train = np.array([[0], [1], [1]], dtype=np.float32)
y_test = np.array([[0]], dtype=np.float32)

# Training the model
model.fit(train_data=(x_train, y_train), epochs=20, batch_size=1)

# Prediction
print(model.predict(x_test[0]))