Beispiel #1
0
def main():
    img_size = 128
    classSize = 5000
    num_epochs = 15

    # Loading Data
    print("\nImporting data..")
    food_files = ImageTools.parseImagePaths('./img/food/')
    sandwich_files = ImageTools.parseImagePaths('./img/sandwich/')
    print("\t..done.\n")

    print("\nAssigning Labels, Generating more images via transformation..")
    print("\tParsing/Labeling foods (sandwiches exclusive)..")
    food_x, food_y = ImageTools.expandClass(food_files, 0, classSize, img_size)
    print("\t\t..done.")
    print("\tParsing/Labeling sandwiches..")
    sandwich_x, sandwich_y = ImageTools.expandClass(sandwich_files, 1,
                                                    classSize, img_size)
    print("\t\t..done.\n")

    # Arranging
    X = np.array(food_x + sandwich_x)
    y = np.array(food_y + sandwich_y)

    # Greyscaling and normalizing inputs to reduce features and improve comparability
    print("\nGreyscaling and Normalizing Images..")
    X = ImageTools.greyscaleImgs(X)
    X = ImageTools.normalizeImgs(X)
    print("\t..done.\n")
    y = to_categorical(y)

    # Train n' test:
    print("\nSplitting data into training and testing..")
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=np.random.randint(0, 100))
    print("\t..done.\n")

    print("\tCalling model..")
    model = network(img_size)  # Calling of CNN
    model.compile('adam', 'categorical_crossentropy', ['accuracy'])
    print("\t..done.\n")

    print("\nTraining Model..")
    model.fit(X_train, y_train, nb_epoch=num_epochs, validation_split=0.1)
    print("\t..done.\n")

    # Saving model
    print("\nPickling and saving model as 'model.pkl'...")
    modelsave = open('model.pkl', 'wb')
    pickle.dump(model, modelsave)
    modelsave.close()
    print("\t..done.\n")
def main():
    img_size = 128
    classSize = 2000

    # Loading model from .pkl
    model_file = open('model.pkl', 'rb')
    model = pickle.load(model_file)

    # Loading data
    print("\nImporting data..")
    hotdog_files = ImageTools.parseImagePaths('./img/hotdog/')
    print("\t..done.\n")

    # Preprocess the hotdog files, just like what was done in trainModel.py
    # note that the class label isn't necessary, as that is what we're trying to determine.
    print("\nGreyscaling and Normalizing Images..")
    x, _ = ImageTools.expandClass(hotdog_files, 0, classSize, img_size)
    x = np.array(x)
    x = ImageTools.greyscaleImgs(x)
    x = ImageTools.normalizeImgs(x)
    print("\t..done.\n")

    # Generating results from the model:
    results = model.predict(x)
    mean = np.mean(results)
    stddev = np.std(results)

    print("--")
    print("'Is a hotdog a sandwich?''")
    print("RESULTS:")
    print("\tMean: {}".format(mean))
    print("\tStandard Deviation: {}".format(stddev))
    print("--")