コード例 #1
0
from sklearn.linear_model import SGDClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from preprocessing import SimplePreprocessor
from datasets import SimpleDatasetLoader
from imutils import paths
import argparse

ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True)
args = vars(ap.parse_args())

print("[INFO] loading images ...")
imagePaths = list(paths.list_images(args["dataset"]))

sp = SimplePreprocessor(32, 32)
sdl = SimpleDatasetLoader(preprocessors=[sp])
(data, labels) = sdl.load(imagePaths, verbose=500)
data = data.reshape((data.shape[0], 3072))

le = LabelEncoder()
labels = le.fit_transform(labels)

(trainX, testX, trainY, testY) = train_test_split(data,
                                                  labels,
                                                  test_size=0.25,
                                                  random_state=5)

#! Regularizers
# loop over our set of regularizers
for r in (None, "l1", "l2"):
コード例 #2
0
import numpy as np
import argparse

img_width, img_height = 48, 48

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True, help="path to input dataset")
args = vars(ap.parse_args())

# grab the list of images that we'll be describing
print("[INFO] loading images...")
imagePaths = list(paths.list_images(args["dataset"]))

# initialize the image preprocessors
sp = SimplePreprocessor(img_width, img_height)

# load the dataset from disk then scale the raw pixel intensities
# to the range [0, 1]
sdl = SimpleDatasetLoader(preprocessors=[sp])
(data, labels) = sdl.load(imagePaths, verbose=500)
data = data.astype("float") / 255.0

# partition the data into training and testing splits using 75% of
# the data for training and the remaining 25% for testing
(trainX, testX, trainY, testY) = train_test_split(data,
                                                  labels,
                                                  test_size=0.25,
                                                  random_state=42)

# convert the labels from integers to vectors
コード例 #3
0
        help='path to model',
    )
    parser.add_argument(
        '-i',
        '--ip',
        required=True,
        help='ip of raspi',
    )
    options = parser.parse_args()

    img_rows = 20
    img_cols = 32
    label_names = ['straight', 'left', 'right']
    preprocessors = [
        ImageToArrayPreprocessor(),
        SimplePreprocessor(width=img_cols, height=img_rows),
    ]

    model = MiniVGGNet.build(width=img_cols,
                             height=img_rows,
                             depth=1,
                             classes=len(label_names))
    model.load_weights(options.model)

    # command_throttle_forward(45)
    while True:
        image = get_frame(options.ip)
        for processor in preprocessors:
            image = processor.preprocess(image)
        data = np.array([image.reshape(20, 32, 1)])
        prediction = model.predict(data, batch_size=1)
コード例 #4
0
OUTPUT_PATH = project_root + "output"
"""----------------------------------初始化-------------------------------------------"""
# 构建data genetor
aug = ImageDataGenerator(rotation_range=20,
                         zoom_range=0.15,
                         width_shift_range=0.2,
                         height_shift_range=0.2,
                         shear_range=0.15,
                         horizontal_flip=True,
                         fill_mode="nearest")

# 载入均值文件
means = json.loads(open(DATASET_MEAN).read())

# initialize the image preprocessors
sp = SimplePreprocessor(227, 227)  # 图像resize到(227,227)
pp = PatchPreprocessor(227, 227)  # 随机切去一个(227,227)的patch
mp = MeanPreprocessor(means["R"], means["G"], means["B"])  # 去掉均值
iap = ImageToArrayPreprocessor()

# initialize the training and validation dataset generators
trainGen = HDF5DatasetGenerator(TRAIN_HDF5,
                                64,
                                aug=aug,
                                preprocessors=[pp, mp, iap],
                                classes=2)
valGen = HDF5DatasetGenerator(VAL_HDF5,
                              64,
                              preprocessors=[sp, mp, iap],
                              classes=2)
コード例 #5
0
def main(argv=None):
    if argv is None:
        argv = sys.argv[1:]
    options = parse_args(argv)

    print("[INFO] loading images...")

    loader = SimpleDatasetLoader(preprocessors=[
        SimplePreprocessor(width=img_cols, height=img_rows),
        ImageToArrayPreprocessor(),
    ])
    data, labels = loader.load(
        driving_log_path=options.driving_log,
        data_path=options.dataset,
        verbose=True,
    )
    data = data.astype('float32')
    import ipdb
    ipdb.set_trace()

    # # horizontal reflection for augmentation
    # data = np.append(data, data[:, :, ::-1], axis=0)
    # labels = np.append(labels, -labels, axis=0)

    # split train and validation
    data, labels = shuffle(data, labels)
    x_train, x_test, y_train, y_test = train_test_split(
        data,
        labels,
        random_state=13,
        test_size=0.1,
    )
    # x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
    # x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)

    lb = LabelBinarizer()
    y_train = lb.fit_transform(y_train)
    y_test = lb.transform(y_test)

    label_names = ['straight', 'left', 'right']

    aug = ImageDataGenerator(
        rotation_range=1,
        width_shift_range=0.1,
        height_shift_range=0.1,
        zoom_range=0.2,
        horizontal_flip=False,
        fill_mode="nearest",
    )

    print('[INFO] compiling model...')
    # model = NvidiaNet.build(width=img_cols, height=img_rows, depth=1)
    # model = TinyNet.build(width=img_cols, height=img_rows, depth=1)
    # model = ShallowNet.build(width=img_cols, height=img_rows, depth=1, classes=len(label_names))
    model = MiniVGGNet.build(width=img_cols,
                             height=img_rows,
                             depth=1,
                             classes=len(label_names))

    opt = SGD(lr=learning_rate,
              momentum=0.9,
              decay=learning_rate / nb_epoch,
              nesterov=True)
    # opt = SGD(lr=learning_rate)
    # opt = Adam(lr=learning_rate)
    # model.compile(
    #     loss='mean_squared_error',
    #     metrics=["accuracy"],
    #     optimizer=opt,
    # )
    model.compile(
        loss='categorical_crossentropy',
        metrics=['accuracy'],
        optimizer=opt,
    )

    history = model.fit_generator(
        aug.flow(x_train, y_train, batch_size=batch_size),
        # history = model.fit(
        #     x_train, y_train,
        nb_epoch=nb_epoch,
        # batch_size=batch_size,
        steps_per_epoch=(len(x_train) // batch_size),
        verbose=1,
        validation_data=(x_test, y_test),
    )

    predictions = model.predict(x_test, batch_size=batch_size)
    print(
        classification_report(
            y_test.argmax(axis=1),
            predictions.argmax(axis=1),
            target_names=label_names,
        ))

    plt.style.use("ggplot")
    fig, ax_acc = plt.subplots(1, 1)

    ax_acc.set_xlabel("Epoch #")

    ax_loss = ax_acc.twinx()
    ax_loss.grid(None)
    ax_loss.set_ylabel("Loss")

    ax_acc.grid(None)
    ax_acc.set_ylabel("Accuracy")
    ax_acc.set_ylim([0, 1])

    ax_loss.plot(np.arange(0, nb_epoch),
                 history.history["loss"],
                 label="train_loss")
    ax_loss.plot(np.arange(0, nb_epoch),
                 history.history["val_loss"],
                 label="val_loss")
    ax_acc.plot(np.arange(0, nb_epoch),
                history.history["acc"],
                label="train_acc")
    ax_acc.plot(np.arange(0, nb_epoch),
                history.history["val_acc"],
                label="val_acc")
    fig.suptitle("Training Loss and Accuracy")
    fig.legend()
    plt.show()

    model.save(options.model)

    return 0