TARGET_HEIGHT = 64
BATCH_SIZE = 32
EPOCHS = 10
LR_INIT = 0.01
DECAY = LR_INIT/EPOCHS
MOMENTUM = 0.9


# 加载图片
aap = AspectAwarePreprocessor(TARGET_WIDTH, TARGET_HEIGHT)
iap = ImageToArrayPreprocessor()

print("[INFO] loading images...")
imagePaths = list(paths.list_images(dataset_path))

sdl = SimpleDatasetLoader(preprocessors=[aap, iap])
(data, labels) = sdl.load(imagePaths, 500, False)
data = data.astype("float") / 255.0

# convert the labels from integers to vectors
le = LabelEncoder().fit(labels)
labels = to_categorical(le.transform(labels), 2)

# partition the data into training and testing splits using 80% of
# the data for training and the remaining 20% for testing
(trainX, testX, trainY, testY) = train_test_split(data,
	        labels, test_size=0.20, stratify=labels, random_state=42)

# construct the image generator for data augmentation
aug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1,
                         height_shift_range=0.1, shear_range=0.2, 
from sklearn.metrics import classification_report

################################################3
# 第一部分:数据预处理

# 全局变量
dataset_path = 'images'

# 全局常量
N_NEIGHBOURS = 5
TARGET_IMAGE_WIDTH = 32
TARGET_IMAGE_HEIGHT = 32

# initialize the image preprocessor and datasetloader
sp = SimplePreprocessor(TARGET_IMAGE_WIDTH, TARGET_IMAGE_HEIGHT)
sdl = SimpleDatasetLoader(preprocessors=[sp])

# Load images
print("[INFO] loading images...")
image_paths = list(paths.list_images(dataset_path))  # path included
(X, y) = sdl.load(image_paths, verbose=500, grayscale=True)

# Flatten (reshape the data matrix)
# convert from (13164,32,32) into (13164,32*32)
X = X.reshape((X.shape[0], TARGET_IMAGE_WIDTH * TARGET_IMAGE_HEIGHT))

# Show some information on memory consumption of the images
print("[INFO] features matrix: {:.1f}MB".format(X.nbytes / (1024 * 1024.0)))

# Label encoder
le = LabelEncoder()