Exemple #1
0
def test_model_evaluate_method():
    model = Model()
    model.set_model(pytorch_model)
    model.compile(optimizer=Adam(), loss_function=MSELoss())
    TrainLogger("ignore/")
    test_gen = train_generator()

    model.fit(
        train_data=(X_train, y_train),
        validation_data=(X_validation, y_validation),
        epochs=1,
        batch_size=32,
    )

    model.evaluate(test_data=(X_test, y_test))
    model.evaluate(test_data=(X_test, y_test), batch_size=4)

    model.evaluate(test_data=test_gen, batch_size=4, tests_steps=4)

    with pytest.raises(ValueError):
        model.evaluate(test_data=(X_test, y_test), batch_size=400)
X_validation = X[int(len(X)*train_data_size):]
y_validation = y[int(len(y)*train_data_size):]

print("Size of Train data is", len(X_train))
print("Size of Validation data is ", len(X_validation))
print("Size of Test data is ", len(X_test))

# Making the model
# Initializing the PyTorch model
p_model = MNISTModel()

# Initializing a NeuralPy model
model = Model()

# Converting the PyTorch model to NeuralPy model
model.set_model(p_model)

# Printing the summary of the model
print(model.summary())

# Compiling the model
model.compile(optimizer=Adam(), loss_function=CrossEntropyLoss(), metrics=["accuracy"])

# Training the model
# Using the fit method
history = model.fit(train_data=(X_train, y_train), test_data=(X_validation, y_validation), epochs=10, batch_size=32)

# Evaluating the model
ev = model.evaluate(X=X_test, y=y_test, batch_size=32)

print(f"Loss: {ev['loss']} and accuracy: {ev['accuracy']}%")