def test_adam_get_layer_method(learning_rate, beta, eps, weight_decay, amsgrad): x = Adam(learning_rate=learning_rate, betas=beta, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad) details = x.get_optimizer() assert isinstance(details, dict) == True assert issubclass(details["optimizer"], _Adam) == True assert isinstance(details["keyword_arguments"], dict) == True assert details["keyword_arguments"]["lr"] == learning_rate assert details["keyword_arguments"]["betas"] == beta assert details["keyword_arguments"]["eps"] == eps assert details["keyword_arguments"]["weight_decay"] == weight_decay assert details["keyword_arguments"]["amsgrad"] == amsgrad
def test_adam_should_throw_value_error(learning_rate, beta, eps, weight_decay, amsgrad): with pytest.raises(ValueError) as ex: x = Adam(learning_rate=learning_rate, betas=beta, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
def test_model_set_model_method(): model = Model() model.set_model(pytorch_model) model.compile(optimizer=Adam(), loss_function=MSELoss()) with pytest.raises(ValueError): model = Model() model.set_model(None)
def test_model_summary_method(): model = Model() model.set_model(pytorch_model) model.compile(optimizer=Adam(), loss_function=MSELoss()) model.summary() with pytest.raises(Exception): model = Model() model.summary()
def test_models_compile_method(): model = Model() model.set_model(pytorch_model) model.compile(optimizer=Adam(), loss_function=MSELoss()) with pytest.raises(ValueError): model = Model() model.set_model(pytorch_model) model.compile(optimizer=Adam(), loss_function=MSELoss(), metrics=["test"]) with pytest.raises(ValueError): model = Model() model.set_model(pytorch_model) model.compile(optimizer=Adam(), loss_function=MSELoss(), metrics="test")
def test_model_save_for_inference_method(): model = Model() model.set_model(pytorch_model) model.compile(optimizer=Adam(), loss_function=MSELoss()) with pytest.raises(ValueError): model.save_for_inference(123) with pytest.raises(ValueError): model.save_for_inference("") model.save_for_inference("ignore/test.npy")
def test_model_evaluate_method(): model = Model() model.set_model(pytorch_model) model.compile(optimizer=Adam(), loss_function=MSELoss()) TrainLogger("ignore/") test_gen = train_generator() model.fit( train_data=(X_train, y_train), validation_data=(X_validation, y_validation), epochs=1, batch_size=32, ) model.evaluate(test_data=(X_test, y_test)) model.evaluate(test_data=(X_test, y_test), batch_size=4) model.evaluate(test_data=test_gen, batch_size=4, tests_steps=4) with pytest.raises(ValueError): model.evaluate(test_data=(X_test, y_test), batch_size=400)
# Generating the data X_train = np.random.rand(100, 1) * 10 y_train = X_train + 5 *np.random.rand(100, 1) X_validation = np.random.rand(100, 1) * 10 y_validation = X_validation + 5 * np.random.rand(100, 1) X_test = np.random.rand(10, 1) * 10 y_test = X_test + 5 * np.random.rand(10, 1) # Making the model model = Sequential() model.add(Dense(n_nodes=1, n_inputs=1)) # Building the model model.build() # Compiling the model model.compile(optimizer=Adam(), loss_function=MSELoss()) # Printing model summary model.summary() # Training the model history = model.fit(train_data=(X_train, y_train), validation_data=(X_validation, y_validation), epochs=300, batch_size=4) # Predicting some values evaluated = model.evaluate(test_data=(X_test, y_test), batch_size=4) print(evaluated)
# Model model = Sequential() model.add(Dense(n_nodes=64, n_inputs=784)) model.add(ReLU()) model.add(Dropout()) model.add(Dense(n_nodes=10)) model.build() model.compile(optimizer=Adam(learning_rate=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.0, amsgrad=False), loss_function=CrossEntropyLoss(), metrics=["accuracy"]) print(model.summary()) # Reading data train_data = pd.read_csv("./data/mnist_train.csv") test_data = pd.read_csv("./data/mnist_test.csv") train_data = train_data.sample(frac=1) train_data = train_data.values test_data = test_data.sample(frac=1)
def test_model_fit_method(): model = Model() model.set_model(pytorch_model) model.compile(optimizer=Adam(), loss_function=MSELoss()) logger = TrainLogger("ignore/") model.fit( train_data=(X_train, y_train), validation_data=(X_validation, y_validation), epochs=1, batch_size=32, ) model.fit( train_data=(X_train, y_train), validation_data=(X_validation, y_validation), epochs=1, batch_size=32, callbacks=[logger], ) train_gen = train_generator() validation_gen = train_generator() model.fit( train_data=train_gen, validation_data=validation_gen, epochs=1, batch_size=4, steps_per_epoch=5, validation_steps=5, ) model.fit( train_data=train_gen, validation_data=validation_gen, epochs=1, batch_size=4, steps_per_epoch=5, validation_steps=5, callbacks=[logger], ) with pytest.raises(ValueError): model.fit( train_data=(X_train, y_train), validation_data=(X_validation, y_validation), epochs=1, batch_size=1024, ) with pytest.raises(ValueError): model.fit( train_data=(X_train, y_train[:-1]), validation_data=(X_validation, y_validation), epochs=-20, batch_size=1024, ) with pytest.raises(ValueError): model.fit( train_data=(X_train, y_train[:-1]), validation_data=(X_validation, y_validation), epochs=1, batch_size=-10, ) with pytest.raises(ValueError): model.fit( train_data=(X_train, y_train[:-1]), validation_data=(X_validation, y_validation), epochs=1, batch_size=32, callbacks="test", ) with pytest.raises(ValueError): train_gen = train_generator() validation_gen = train_generator() model.fit( train_data=train_gen, validation_data=validation_gen, epochs=1, batch_size=32, steps_per_epoch=-123, validation_steps=5, ) with pytest.raises(ValueError): train_gen = train_generator() validation_gen = train_generator() model.fit( train_data=train_gen, validation_data=validation_gen, epochs=1, batch_size=32, steps_per_epoch="test", validation_steps=5, ) with pytest.raises(ValueError): train_gen = train_generator() validation_gen = train_generator() model.fit( train_data=train_gen, validation_data=validation_gen, epochs=1, batch_size=32, steps_per_epoch=5, validation_steps=-23, ) with pytest.raises(ValueError): train_gen = train_generator() validation_gen = train_generator() model.fit( train_data=train_gen, validation_data=validation_gen, epochs=1, batch_size=32, steps_per_epoch=5, validation_steps="asd", )
model.add(Conv2D(filters=384, kernel_size=3, stride=1, padding=1)) model.add(ReLU()) model.add(Conv2D(filters=256, kernel_size=3, stride=1, padding=1)) model.add(ReLU()) model.add(MaxPool2D(kernel_size=3, stride=2)) model.add(ReLU()) model.add(Flatten()) model.add(Dense(n_nodes=4096)) model.add(ReLU()) model.add(Dense(n_nodes=4096)) model.add(ReLU()) model.add(Dense(n_nodes=10)) model.add(Softmax()) model.build() model.compile(optimizer=Adam(), loss_function=CrossEntropyLoss(), metrics=["accuracy"]) print(model.summary()) # Get the training Data train_set = datasets.MNIST( root='./data' ,train=True ,download=True ,transform=transforms.Compose([ transforms.CenterCrop(224), transforms.ToTensor() ]) ) # Load the dataset from pytorch's Dataloader function train_loader = torch.utils.data.DataLoader(train_set, batch_size=1000)
import pandas as pd import numpy as np # Model model = Sequential() model.add(Dense(n_nodes=264, n_inputs=784)) model.add(ReLU()) model.add(Dropout()) model.add(Dense(n_nodes=10)) model.build() model.compile(optimizer=Adam(), loss_function=CrossEntropyLoss(), metrics=["accuracy"]) print(model.summary()) # Reading data train_data = pd.read_csv("./data/mnist_train.csv", header=None) test_data = pd.read_csv("./data/mnist_test.csv", header=None) train_data = train_data.sample(frac=1) train_data = train_data.values test_data = test_data.sample(frac=1) test_data = test_data.values
''' model = Sequential() model.add(Dense(n_nodes=1, n_inputs=3)) model.add(ReLU()) model.add(Dense(n_nodes=2)) model.add(ReLU()) model.add(Dense(n_nodes=1)) model.add(ReLU()) # Building the Model model.build() # Compiling model.compile(optimizer=Adam(), loss_function=MSELoss(), metrics=["accuracy"]) print(model.summary()) # Data for XOR x_train = np.array([[0, 0, 1], [0, 1, 1], [1, 0, 1]]) x_test = np.array([[1, 1, 1]]) y_train = np.array([[0], [1], [1]], dtype=np.float32) y_test = np.array([[0]], dtype=np.float32) # Training the model model.fit(train_data=(x_train, y_train), epochs=20, batch_size=1) # Prediction print(model.predict(x_test[0]))