示例#1
0
def predict_flower(data: Iris):
    data = data.dict()
    print(data)
    print('Hello')
    petallength = data['petallength']
    sepallength = data['sepallength']
    sepalwidth = data['sepalwidth']
    petalwidth = data['petalwidth']

    # print(model.predict([[petallength, sepallength, sepalwidth, petalwidth]]))
    print('Hello')
    prediction = model.predict(
        [[petallength, sepallength, sepalwidth, petalwidth]])

    print("prediction value", prediction[0])
    if (prediction[0] == 0):
        prediction = 'This flower is iris-setosa'
    elif (prediction[0] == 1):
        prediction = 'This flower is iris versicolor'
    else:
        prediction = 'This flower is iris virginica'

    return {'prediction': prediction}
示例#2
0
import pandas as pd

from Cluster import Cluster
from Iris import Iris
from Universe import Univervse

irises = pd.read_csv('../Resources/iris.txt', sep=',', header=None)
irises.columns = ['x1', 'y1', 'x2', 'y2', 'name']

clusters_names = irises.name.unique()

irises_ = [
    Iris(row['x1'], row['y1'], row['x2'], row['y2'], row['name'])
    for _, row in irises.iterrows()
]

cluster_1 = Cluster()
cluster_2 = Cluster()
cluster_3 = Cluster()

for element in irises_:

    if element.name == clusters_names[0]:
        cluster_1.add(element)

    if element.name == clusters_names[1]:
        cluster_2.add(element)

    if element.name == clusters_names[2]:
        cluster_3.add(element)
示例#3
0
文件: Main.py 项目: PWNsbey/cs450
from DataWizard import Wizard
from Iris import Iris as Iris

dataset = datasets.load_iris()

print("Dataset values:")

# Show the data (the attributes of each example)
print(dataset.data)

# Show the target values (in numeric format) of each example
print(dataset.target)

# Show the actual target names that correspond to each number
print(dataset.target_names)

print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")

# organize and get the data back from the Wizard
discreteRefList = [4, 4, 4, 4]
wizard = Wizard(dataset, discreteRefList)
workingDatasets = wizard.organize_data()

iris = Iris(discreteRefList, wizard)
iris.train(workingDatasets[0])

iris.printTree()

iris.categorize(workingDatasets[1])
print( "\n\n\n", str(iris.accuracy) + "% correct")
示例#4
0
from Iris import Iris
from Histogram import Histogram

iris = Iris()
attributes = ["Sepal Length", "Sepal Width", "Petal Length", "Petal Width"]

for attr in attributes:
    print("Average " + attr + ": " + str(iris.calculate_average(attr)))
    print("Median " + attr + ": " + str(iris.calculate_median(attr)))
    print("Minimum " + attr + ": " + str(iris.get_min_range(attr)))
    print("First Quartile " + attr + ": " + str(iris.calculate_25(attr)))
    print("Third Quartile " + attr + ": " + str(iris.calculate_75(attr)))
    print("Maximum " + attr + ": " + str(iris.get_max_range(attr)))
    print("Variance " + attr + ": " + str(iris.calculate_variance(attr)) +
          "\n")

    histogram = Histogram(iris, attr)
    histogram.setup_data()
    histogram.build_histogram()
示例#5
0
文件: Main.py 项目: PWNsbey/cs450
from sklearn import datasets
from DataWizard import Wizard
from Iris import Iris as Iris

dataset = datasets.load_iris()

print("Dataset values:")

# Show the data (the attributes of each example)
print(dataset.data)


# Show the target values (in numeric format) of each example
print(dataset.target)

# Show the actual target names that correspond to each number
print(dataset.target_names)

print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")

# organize and get the data back from the Wizard
wizard = Wizard(dataset)
workingDatasets = wizard.organize_data()

nodeLayersArray = [3]  # LAST NEURON LAYER SHOULD BE DETERMINED BY NUMBER OF POSSIBLE TARGETS
Iris = Iris()
Iris.train(workingDatasets[0], nodeLayersArray)
Iris.test(workingDatasets[1])
Iris.printNetwork(Iris.neuralNetwork)
示例#6
0
from nearest_neighbors_preprocess import normalizeData
from nearest_neighbors_alg import runNN
from Iris import Iris

testing_data = [
    Iris(4.9, 3.0, 1.4, .2, ''),
    Iris(4.9, 2.4, 3.3, 1.0, ''),
    Iris(4.9, 2.5, 4.5, 1.7, '')
]

iris_data = []
max = []
min = []
iris_file = open("iris.txt", "r")

for line in iris_file:
    split_line = line.split(',')
    iris = Iris(split_line[0], split_line[1], split_line[2], split_line[3],
                split_line[4])
    iris_data.append(iris)

iris_data, max, min = normalizeData(iris_data)

for i in range(3):
    testing_data[i].sepal_length = (testing_data[i].sepal_length -
                                    min[0]) / (max[0] - min[0])
    testing_data[i].sepal_width = (testing_data[i].sepal_width -
                                   min[1]) / (max[1] - min[1])
    testing_data[i].pedal_length = (testing_data[i].pedal_length -
                                    min[2]) / (max[2] - min[2])
    testing_data[i].pedal_width = (testing_data[i].pedal_width -
示例#7
0
from DataWizard import Wizard
from Iris import Iris as Iris

dataset = datasets.load_iris()

print("Dataset values:")

# Show the data (the attributes of each example)
print(dataset.data)

# Show the target values (in numeric format) of each example
print(dataset.target)

# Show the actual target names that correspond to each number
print(dataset.target_names)

print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")

# organize and get the data back from the Wizard
discreteRefList = [4, 4, 4, 4]
wizard = Wizard(dataset, discreteRefList)
workingDatasets = wizard.organize_data()

iris = Iris(discreteRefList, wizard)
iris.train(workingDatasets[0])

iris.printTree()

iris.categorize(workingDatasets[1])
print("\n\n\n", str(iris.accuracy) + "% correct")
示例#8
0
文件: main.py 项目: pwnsbey/cs450
print(irisDataset.data)

# Show the target values (in numeric format) of each instance
print(irisDataset.target)

# Show the actual target names that correspond to each number
print(irisDataset.target_names)

print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")

#organize and get the data back from the Organizer
workingDatasets = Organizer(irisDataset).organize_data()

# printing the shuffled sets
for i in range(len(workingDatasets[0])):
    print("Training set: ", workingDatasets[0][i].target)
print("-------------------------")
for i in range(len(workingDatasets[1])):
    print("Testing set: ", workingDatasets[1][i].target)
print("--------------------------------------------------------------")

# IT'S ALIIIIIIVE
Iris = Iris()

# Train that thang
Iris.train(workingDatasets[0])

# Run the predictions. Test mercilessly.
results = Iris.predict(workingDatasets[1], 4)

print(Iris.percent_correct(), "percent accuracy. Git gud scrub.")
示例#9
0
    return np.array(result)


if __name__ == '__main__':
    k = 2
    try:
        k = int(input('Enter the number of clusters: '))
    except ValueError:
        print('Invalid input')
    file = os.path.join(os.path.join(os.getcwd(), 'iris'), 'train.txt')
    iris_data = []
    with open(file) as f:
        for line in f:
            data = line.split(',')
            data[-1] = data[-1].replace('\n', '')
            element = Iris([float(i) for i in data[:-1]], data[-1])
            iris_data.append(element)

    clusters = []
    for i in range(k):
        clusters.append(Cluster('cluster{}'.format(i)))

    # assign random values
    for el in iris_data:
        j = randint(0, k - 1)
        clusters[j].append_el(el)
    # print(clusters)

    old_centroids = np.array([c.get_centroid() for c in clusters])
    # clusters_copy = clusters
    new_centroids = iteration(clusters, iris_data)
示例#10
0
文件: Main.py 项目: pwnsbey/cs450
from sklearn import datasets
from DataWizard import Wizard
from Iris import Iris as Iris

dataset = datasets.load_iris()

print("Dataset values:")

# Show the data (the attributes of each example)
print(dataset.data)

# Show the target values (in numeric format) of each example
print(dataset.target)

# Show the actual target names that correspond to each number
print(dataset.target_names)

print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")

# organize and get the data back from the Wizard
wizard = Wizard(dataset)
workingDatasets = wizard.organize_data()

nodeLayersArray = [
    3
]  # LAST NEURON LAYER SHOULD BE DETERMINED BY NUMBER OF POSSIBLE TARGETS
Iris = Iris()
Iris.train(workingDatasets[0], nodeLayersArray)
Iris.test(workingDatasets[1])
Iris.printNetwork(Iris.neuralNetwork)
示例#11
0
              metrics=['accuracy'])

model.fit(training_data.data, training_data.labels, epochs=500, batch_size=32)

# Evaluate model
# --------------------------------------

evaluate_model(model, training_data)

# Evaluate a few single iris samples
# --------------------------------------

# This is from test data. It should be an iris virginica (2)
print('\n\nTest iris 1:')
print('(This is from test data. It should be an iris virginica)')
test_iris = Iris(sepal_length=4.9,
                 sepal_width=2.5,
                 petal_length=4.5,
                 petal_width=1.7)
test_iris.classify(model=model)
print(test_iris)

# Made up sample to classiy
print('\n\nTest iris 2:')
test_iris_2 = Iris(sepal_length=5.4,
                   sepal_width=3.1,
                   petal_length=2.5,
                   petal_width=2.3)
test_iris_2.classify(model=model)
print(test_iris_2)
示例#12
0
from Iris import Iris
import numpy as np

print ("Iris Class Predictor")
print ("Attributes\nsepal length\nsepal width\npetal length\npetal width\n\n\nClass: Iris Setosa\tIris Versicolour\t Iris Virginica")

user_input = []
print ("User Input \nExamples\n5.1,3.5,1.4,0.2\n4.9,3.0,1.4,0.2")

print ("Sepal length")
user_input.append(float(input()))
print ("Sepal Width")
user_input.append(float(input()))
print ("Petal length")
user_input.append(float(input()))
print ("Petal Width")
user_input.append(float(input()))

input_2d_format = [user_input]

iris = Iris()
predictions = iris.KNN(input_2d_format)
iris.displayPrediction(input_2d_format, predictions)