def processing_image(image):
    aap = AspectAwarePreprocessor(64, 64)
    iap = ImageToArrayPreprocessor()

    sdl_nivel = SimpleDatasetLoader(preprocessors=[aap, iap])

    X_test = sdl_nivel.preprocess(image)
    X_test = X_test.astype("float") / 255.0

    return X_test
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from preprocessing import SimplePreprocessor
from datasets import SimpleDatasetLoader
from imutils import paths
import argparse

ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True)
args = vars(ap.parse_args())

print("[INFO] loading images ...")
imagePaths = list(paths.list_images(args["dataset"]))

sp = SimplePreprocessor(32, 32)
sdl = SimpleDatasetLoader(preprocessors=[sp])
(data, labels) = sdl.load(imagePaths, verbose=500)
data = data.reshape((data.shape[0], 3072))

le = LabelEncoder()
labels = le.fit_transform(labels)

(trainX, testX, trainY, testY) = train_test_split(data,
                                                  labels,
                                                  test_size=0.25,
                                                  random_state=5)

#! Regularizers
# loop over our set of regularizers
for r in (None, "l1", "l2"):
    print("[INFO] training model with '{}' penalty".format(r))
    "--dataset",
    required=False,
    default=
    "/home/vamsimocherla/Research/DeepLearning/DrAdrian/0-StarterBundle/SB_Code/datasets/animals",
    help="path to input dataset")
args = vars(ap.parse_args())
# grab the list of images that we’ll be describing
print("[INFO] loading images...")
image_paths = list(paths.list_images(args["dataset"]))

# initialize the image processors
sp = SimplePreprocessor(32, 32)
ip = ImageToArrayPreprocessor()

# load dataset
sdl = SimpleDatasetLoader(preprocessors=[sp, ip])
# scale raw pixel intensities to range [0, 1]
data, labels = sdl.load(image_paths, verbose=500)
data = data.astype("float") / 255.0

# split data into training and testing
trainX, testX, trainY, testY = train_test_split(data,
                                                labels,
                                                test_size=0.25,
                                                random_state=42)
# one-hot encoding - convert labels to vectors
trainY = LabelBinarizer().fit_transform(trainY)
testY = LabelBinarizer().fit_transform(testY)

# initialize the optimizer and model
print("[INFO] compiling model")
classLabels = ["cat", "dog", "panda"]

# grab the list of images in the dataset then randomly sample
# indexes into the image paths list
print("[INFO] sampling images...")
imagePaths = np.array(list(paths.list_images(args["dataset"])))
idxs = np.random.randint(0, len(imagePaths), size=(10,))
imagePaths = imagePaths[idxs]

# initialize the image preprocessors
sp = SimplePreprocessor(32, 32)
iap = ImageToArrayPreprocessor()

# load the dataset from disk then scale the raw pixel intensities
# to the range [0, 1]
sdl = SimpleDatasetLoader(preprocessors=[sp, iap])
(data, labels) = sdl.load(imagePaths)
data = data.astype("float") / 255.0

# load the pre-trained network
print("[INFO] loading pre-trained network...")
model = load_model(args["model"])

# make predictions on the images
print("[INFO] predicting...")
preds = model.predict(data, batch_size=32).argmax(axis=1)

# loop over the sample images
for (i, imagePath) in enumerate(imagePaths):
	# load the example image, draw the prediction, and display it
	# to our screen
def main(argv=None):
    if argv is None:
        argv = sys.argv[1:]
    options = parse_args(argv)

    print("[INFO] loading images...")

    loader = SimpleDatasetLoader(preprocessors=[
        SimplePreprocessor(width=img_cols, height=img_rows),
        ImageToArrayPreprocessor(),
    ])
    data, labels = loader.load(
        driving_log_path=options.driving_log,
        data_path=options.dataset,
        verbose=True,
    )
    data = data.astype('float32')
    import ipdb
    ipdb.set_trace()

    # # horizontal reflection for augmentation
    # data = np.append(data, data[:, :, ::-1], axis=0)
    # labels = np.append(labels, -labels, axis=0)

    # split train and validation
    data, labels = shuffle(data, labels)
    x_train, x_test, y_train, y_test = train_test_split(
        data,
        labels,
        random_state=13,
        test_size=0.1,
    )
    # x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
    # x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)

    lb = LabelBinarizer()
    y_train = lb.fit_transform(y_train)
    y_test = lb.transform(y_test)

    label_names = ['straight', 'left', 'right']

    aug = ImageDataGenerator(
        rotation_range=1,
        width_shift_range=0.1,
        height_shift_range=0.1,
        zoom_range=0.2,
        horizontal_flip=False,
        fill_mode="nearest",
    )

    print('[INFO] compiling model...')
    # model = NvidiaNet.build(width=img_cols, height=img_rows, depth=1)
    # model = TinyNet.build(width=img_cols, height=img_rows, depth=1)
    # model = ShallowNet.build(width=img_cols, height=img_rows, depth=1, classes=len(label_names))
    model = MiniVGGNet.build(width=img_cols,
                             height=img_rows,
                             depth=1,
                             classes=len(label_names))

    opt = SGD(lr=learning_rate,
              momentum=0.9,
              decay=learning_rate / nb_epoch,
              nesterov=True)
    # opt = SGD(lr=learning_rate)
    # opt = Adam(lr=learning_rate)
    # model.compile(
    #     loss='mean_squared_error',
    #     metrics=["accuracy"],
    #     optimizer=opt,
    # )
    model.compile(
        loss='categorical_crossentropy',
        metrics=['accuracy'],
        optimizer=opt,
    )

    history = model.fit_generator(
        aug.flow(x_train, y_train, batch_size=batch_size),
        # history = model.fit(
        #     x_train, y_train,
        nb_epoch=nb_epoch,
        # batch_size=batch_size,
        steps_per_epoch=(len(x_train) // batch_size),
        verbose=1,
        validation_data=(x_test, y_test),
    )

    predictions = model.predict(x_test, batch_size=batch_size)
    print(
        classification_report(
            y_test.argmax(axis=1),
            predictions.argmax(axis=1),
            target_names=label_names,
        ))

    plt.style.use("ggplot")
    fig, ax_acc = plt.subplots(1, 1)

    ax_acc.set_xlabel("Epoch #")

    ax_loss = ax_acc.twinx()
    ax_loss.grid(None)
    ax_loss.set_ylabel("Loss")

    ax_acc.grid(None)
    ax_acc.set_ylabel("Accuracy")
    ax_acc.set_ylim([0, 1])

    ax_loss.plot(np.arange(0, nb_epoch),
                 history.history["loss"],
                 label="train_loss")
    ax_loss.plot(np.arange(0, nb_epoch),
                 history.history["val_loss"],
                 label="val_loss")
    ax_acc.plot(np.arange(0, nb_epoch),
                history.history["acc"],
                label="train_acc")
    ax_acc.plot(np.arange(0, nb_epoch),
                history.history["val_acc"],
                label="val_acc")
    fig.suptitle("Training Loss and Accuracy")
    fig.legend()
    plt.show()

    model.save(options.model)

    return 0