def train(img_local_path, label_path, model_object_key): model = SqueezeNet(weights='imagenet') img = image.load_img(img_local_path, target_size=(227, 227)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) label_file = open(label_path) y = np.array([label_file.read()]) label_file.close() model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) history = model.fit(x, y) model.summary() model.save_weights(tmp_path + model_object_key) return history.history
def main(): pl_train, pl_labels = get_dataset('./Pan_Licence/') pl_labels = to_categorical(pl_labels, num_classes=36) x_train, x_val, y_train, y_val = train_test_split(pl_train, pl_labels, test_size=0.2, random_state=2064) tb = TensorBoard(log_dir='./logs/Squeezenet', write_graph=True) model = SqueezeNet() model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) print(model.summary()) history = model.fit(x_train, y_train, batch_size=32, epochs=5, validation_split=0.1, shuffle=True, callbacks=[tb]) ## Save Model json_model = model.to_json() with open('model_squeezenet.json', 'w') as f: f.write(json_model) model.save_weights('model_squeezenet.h5') print('Model Saved') print('Evaluating Model') predict = model.evaluate(x=x_val, y=y_val, batch_size=1) print('Score', predict[1] * 100.00) print('Loss', predict[0])
class DNNModel: def __init__(self, image_path): self.IMAGE_SIZE = 64 self.data = [] self.labels = [] self.model = self.build_model() if image_path is not None: self.image_path = image_path else: self.image_path = "/home/madi/deeplearning/raspberry-pi/datasets" pass def gen_training_image_set(self): imagePaths = os.listdir(self.image_path) # loop over the input images for imagePath in imagePaths: # load the image, pre-process it, and store it in the data list imagePath = self.image_path + "/" + imagePath print imagePath image = cv2.imread(imagePath) image = cv2.resize(image, (self.IMAGE_SIZE, self.IMAGE_SIZE)) image = img_to_array(image) self.data.append(image) # extract the class label from the image path and update the # labels list] if "left" in imagePath.split(os.path.sep)[-2]: label = 1 elif "right" in imagePath.split(os.path.sep)[-2]: label = 2 else: label = 0 self.labels.append(label) # scale the raw pixel intensities to the range [0, 1] self.data = np.array(self.data, dtype="float") / 255.0 self.labels = np.array(self.labels) def add_training_sample(self, data, label): image = cv2.resize(data, (self.IMAGE_SIZE, self.IMAGE_SIZE)) image = img_to_array(image) self.data.append(image) self.labels.append(label) def scale_and_norm_training_samples(self): # scale the raw pixel intensities to the range [0, 1] self.data = np.array(self.data, dtype="float") / 255.0 self.labels = np.array(self.labels) def build_model(self): self.model = SqueezeNet(include_top=True, weights=None, classes=3, input_shape=(self.IMAGE_SIZE, self.IMAGE_SIZE, 3)) self.model.summary() opt = Adam() self.model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"]) return self.model def train(self): # split train and test set (trainX, testX, trainY, testY) = train_test_split(self.data, self.labels, test_size=0.25, random_state=42) # convert the labels from integers to vectors trainY = to_categorical(trainY, num_classes=3) testY = to_categorical(testY, num_classes=3) print trainX.shape print trainY.shape self.model.fit(trainX, trainY, batch_size=1, epochs=50, verbose=1, validation_data=(testX, testY)) self.test() pass def predict(self, img_frame): img_frame = cv2.resize(img_frame, (self.IMAGE_SIZE, self.IMAGE_SIZE)) img_frame = img_to_array(img_frame) data = np.array([img_frame]) # scale the raw pixel intensities to the range [0, 1] data = np.array(data, dtype="float") / 255.0 ret = self.model.predict(data) if len(ret) > 0: return ret[0] pass def save_model(self): self.model.save("greenball_squeezenet_local.h5") pass def load_model(self, path): self.model = load_model("greenball_squeezenet_local.h5") pass def test(self): cnt = 0 for i in xrange(len(self.data)): ret = self.model.predict(self.data[i]) pred = np.argmax(ret) if pred == self.labels[i]: cnt += 1 print "total correct number is %d" % cnt
Y_train = to_categorical(y_train, nb_classes) Y_test = to_categorical(y_test, nb_classes) # classes = to_categorical(classes, nb_classes=nr_classes) print('Loading model..') model = SqueezeNet(nb_classes, input_shape=input_shape) model.compile(loss="categorical_crossentropy", optimizer='adam', metrics=['accuracy']) if os.path.isfile(weights_file): print('Loading weights: %s' % weights_file) model.load_weights(weights_file, by_name=True) print('Fitting model') model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_split=0.2, initial_epoch=0) print("Finished fitting model") print('Saving weights') model.save_weights(weights_file, overwrite=True) print('Evaluating model') score = model.evaluate(X_test, Y_test, verbose=1) print('result: %s' % score)
random.shuffle(imgpaths_classes) images, classes = zip(*imgpaths_classes) classes = to_categorical(classes, nb_classes=nb_classes) images = np.asarray(images) print('Loading model..') model = SqueezeNet(nb_classes, input_shape=(227, 227, 3)) model.compile(loss="categorical_crossentropy", optimizer='adam', metrics=['accuracy', 'categorical_crossentropy']) if os.path.isfile(weights_file): print('Loading weights: %s' % weights_file) model.load_weights(weights_file, by_name=True) print('Fitting model') model.fit(images, classes, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_split=0.2, initial_epoch=0) print("Finished fitting model") print('Saving weights') model.save_weights(weights_file, overwrite=True) print('Evaluating model') score = model.evaluate(images, classes, verbose=1) print('result: %s' % score)