def test_different_networks(amount): newmodel = LeNet.build(width=28, height=28, depth=1, classes=num_classes, weightsPath='output/lenet_weights_' + str(amount) + 'noise.hdf5') newmodel.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) base_dir = os.path.join(os.path.dirname(__file__), '../datasets/') x_test_shapes = np.load( base_dir + 'random_shapes/20shapes/op0.4.npz')['arr_0'][5000:10000] x_test_bars = np.load(base_dir + 'bar_noise/op0.4.npz')['arr_0'][5000:10000] x_test_pixels = np.load(base_dir + 'random_pixels/op0.4.npz')['arr_0'][5000:10000] x_test_numbers = np.load(base_dir + 'number_noise/op0.4.npz')['arr_0'][5000:10000] x_test_extended = np.concatenate( (x_test, x_test_shapes, x_test_bars, x_test_pixels, x_test_numbers), axis=0) y_test_extended = np.concatenate( (y_test, y_test[5000:10000], y_test[5000:10000], y_test[5000:10000], y_test[5000:10000])) print(str(len(x_test_extended))) print(str(len(y_test_extended))) (loss, accuracy) = newmodel.evaluate(x_test_extended, y_test_extended, batch_size=batch_size, verbose=1) print("Percentage of noise added to the dataset" + str(amount / (amount + 10000) * 100) + ", accuracy: " + str(accuracy))
def train(aug, trainX, trainY, testX, testY): # initialize the model print("[INFO] compiling model...") model = LeNet.build(width=norm_size, height=norm_size, depth=1, classes=CLASS_NUM) #model = MyModel.build(width=norm_size, height=norm_size, depth=1, classes=CLASS_NUM) #model = InceptionV3(weights='imagenet', include_top=True) #model = InceptionV3.build(width=norm_size, height=norm_size, depth=1, calsses=CLASS_NUM) opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS) model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) # train the network print("[INFO] training network...") H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS), validation_data=(testX, testY), steps_per_epoch=len(trainX) // BS, epochs=EPOCHS, verbose=1) # save the model to disk print("[INFO] serializing network...") model.save('./model')
def train(aug, train_X, train_Y, test_X, test_Y): print("[INFO] compiling model...") model = LeNet.build(width=WIDTH, height=HEIGHT, depth=3, classes=CLASS_NUM) opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS) model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) print("[INFO] training network...") _history = model.fit_generator(aug.flow(train_X, train_Y, batch_size=BATCH_SIZE), validation_data=(test_X, test_Y), steps_per_epoch=len(train_X), epochs=EPOCHS, verbose=1) #steps_per_epoch是每次迭代,需要迭代多少个batch_size,validation_data为test数据,直接做验证,不参与训练 model.save("./save/model.h5") #保存模型 plt.style.use("ggplot") plt.figure() N = EPOCHS plt.plot(np.arange(0, N), _history.history["loss"], label="train_loss") plt.plot(np.arange(0, N), _history.history["val_loss"], label="val_loss") plt.plot(np.arange(0, N), _history.history["acc"], label="train_acc") plt.plot(np.arange(0, N), _history.history["val_acc"], label="val_acc") plt.title("loss and accuracy") plt.xlabel("epoch") plt.ylabel("loss/acc") plt.legend(loc="best") plt.savefig("./result/result.png") plt.show()
def LeNet_train(aug, trainX, trainY, testX, testY): # initialize the model weights_path = './save_weights/leNet.ckpt' model = LeNet.build(width=norm_size, height=norm_size, depth=3, classes=CLASS_NUM) print("[INFO] compiling model...") checkpoint_path = './save_weights/leNet.ckpt' # model.load_weights(checkpoint_path) # if os.path.exists(checkpoint_path): # model.load_weights(checkpoint_path) # # 若成功加载前面保存的参数,输出下列信息 # print("checkpoint_loaded") opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS) model.compile( loss="categorical_crossentropy", optimizer= opt, # TODO:任务是一个多分类问题,可以使用类别交叉熵(categorical_crossentropy)。但如果执行的分类任务仅有两类,那损失函数应更换为二进制交叉熵损失函数(binary cross-entropy) metrics=["accuracy"]) # 断点续训 checkpoint = ModelCheckpoint(checkpoint_path, monitor='acc', save_weights_only=True, verbose=1, save_best_only=True, period=1) # train the network print("[INFO] training network...") H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS), validation_data=(testX, testY), validation_steps=12, steps_per_epoch=len(trainX) // BS, epochs=EPOCHS, verbose=1, callbacks=[checkpoint]) # save the model to disk print("[INFO] serializing network...") # model.save("test_sign.model") # plot the training loss and accuracy plt.style.use("ggplot") plt.figure() N = EPOCHS plt.plot(np.arange(0, N), H.history["loss"], label="train_loss") plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss") plt.plot(np.arange(0, N), H.history["accuracy"], label="train_acc") plt.plot(np.arange(0, N), H.history["val_acc"], label="val_acc") plt.title("Training Loss and Accuracy on traffic-sign classifier") plt.xlabel("Epoch #") plt.ylabel("Loss/Accuracy") plt.legend(loc="lower left") plt.savefig("plot.png")
def read_date(arg, weightsPath, numChannels=1, imgRows=28, imgCols=28, numClasses=10, filename='hwdates.jpg', folder=tmp_folder): outDt = '' #showim(formDt); #print(formDt.shape) model = LeNet.build(numChannels=1, imgRows=28, imgCols=28, numClasses=10, weightsPath=weightsPath) if type(arg) is np.ndarray: img = arg.copy() formDt = arg.copy() else: img = cv2.imread(arg) formDt = cv2.imread(arg, 0) ret, thresh = cv2.threshold(~formDt, 127, 255, 0) image, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) (sorted_ctrs, boundingBoxes) = sort_contours(contours, method="left-to-right") for i, c in enumerate(sorted_ctrs): tmp_img = np.zeros(formDt.shape, dtype=np.uint8) res = cv2.drawContours(tmp_img, [c], -1, 255, cv2.FILLED) tmp_img = np.bitwise_and(tmp_img, ~formDt) ret, inverted = cv2.threshold(tmp_img, 127, 255, cv2.THRESH_BINARY_INV) cnt = sorted_ctrs[i] x, y, w, h = cv2.boundingRect(cnt) cv2.rectangle(img, (x - 1, y - 1), (x + w + 1, y + h + 1), (0, 255, 0), 2) cropped = inverted[y:y + h, x:x + w] if (w < 15 and h < 15): continue cropped = cv2.bitwise_not(cropped) thresh = cv2.threshold(cropped, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1] kernel = np.ones((2, 2), np.uint8) gray_dilation = cv2.dilate(thresh, kernel, iterations=1) gray_erosion = cv2.erode(gray_dilation, kernel, iterations=1) gray_erosion = cv2.copyMakeBorder(gray_erosion, top=15, bottom=15, left=15, right=15, borderType=cv2.BORDER_CONSTANT, value=[0, 0, 0]) the_img = cv2.resize(gray_erosion, (28, 28)) the_img = np.reshape(the_img, (1, 28, 28, 1)) probs = model.predict(the_img) prediction = probs.argmax(axis=1) outDt = outDt + str(prediction[0]) cv2.imwrite(os.path.join(folder, filename), img) K.clear_session() return outDt[:2] + '-' + outDt[3:5] + '-' + outDt[6:]
def train_with_noise(amount): # initialize the model with the given parameters. newmodel = LeNet.build( width=28, height=28, depth=1, classes=num_classes, weightsPath=args["weights"] if args["load_model"] > 0 else None) # we use categorical_crossentropy as our loss function newmodel.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) base_dir = os.path.join(os.path.dirname(__file__), '../datasets/') x_train_shapes = np.load( base_dir + 'random_shapes/20shapes/op0.4.npz')['arr_0'][0:math.ceil(amount / 4)] x_train_bars = np.load(base_dir + 'bar_noise/op0.4.npz')['arr_0'][0:math.ceil(amount / 4)] x_train_pixels = np.load( base_dir + 'random_pixels/op0.4.npz')['arr_0'][0:math.ceil(amount / 4)] x_train_numbers = np.load( base_dir + 'number_noise/op0.4.npz')['arr_0'][0:math.ceil(amount / 4)] x_train_extended = np.concatenate((x_train, x_train_shapes, x_train_bars, x_train_pixels, x_train_numbers), axis=0) y_train_extended = np.concatenate( (y_train, y_train[0:math.ceil(amount / 4)], y_train[0:math.ceil(amount / 4)], y_train[0:math.ceil(amount / 4)], y_train[0:math.ceil(amount / 4)])) print("[INFO] training with noise:" + str(amount)) newmodel.fit(x_train_extended, y_train_extended, batch_size=batch_size, epochs=epochs, verbose=1) newmodel.save_weights('output/lenet_weights_' + str(amount) + 'noise.hdf5', overwrite=True)
def train(aug, trainX, trainY, testX, testY, args): # initialize the model print("[INFO] compiling model...") model = LeNet.build(width=norm_size, height=norm_size, depth=3, classes=CLASS_NUM) opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS) # opt = Adam(lr=INIT_LR) model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) # train the network print("[INFO] training network...") H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS), validation_data=(testX, testY), steps_per_epoch=len(trainX) // BS, epochs=EPOCHS, verbose=1) # save the model to disk print("[INFO] serializing network...") model.save(args["model"]) # plot the training loss and accuracy plt.style.use("ggplot") plt.figure() N = EPOCHS plt.plot(np.arange(0, N), H.history["loss"], label="train_loss") plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss") plt.plot(np.arange(0, N), H.history["acc"], label="train_acc") plt.plot(np.arange(0, N), H.history["val_acc"], label="val_acc") plt.title("Training Loss and Accuracy on Invoice classifier") plt.xlabel("Epoch #") plt.ylabel("Loss/Accuracy") plt.legend(loc="lower left") plt.savefig(args["plot"])
def train(aug, trainX, trainY, testX, testY, args): # initialize the model print("[INFO] compiling model...") model = LeNet.build(width=NORM_SIZE, height=NORM_SIZE, depth=3, classes=CLASS_NUM) opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS) model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) # train the network print("[INFO] training network...") H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS), validation_data=(testX, testY), steps_per_epoch=len(trainX) // BS, epochs=EPOCHS, verbose=1) # save the model to disk print("[INFO] serializing network...") model.save(args["model"])
height_shift_range=0.1, horizontal_flip=True, fill_mode="nearest") # Construct the set of callbacks figPath = os.path.sep.join([args["output"], "{}.png".format(os.getpid())]) jsonPath = os.path.sep.join([args["output"], "{}.json".format(os.getpid())]) callbacks = [ TrainingMonitor(figPath=figPath, jsonPath=jsonPath), LearningRateScheduler(poly_decay) ] # Initialize the optimizer and model print("[INFO] Compliling model...") opt = SGD(lr=INIT_LR, momentum=0.9) model = LeNet.build(width=32, height=32, depth=3, classes=10) model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) # Train the network print("[INFO] training network...") model.fit_generator(aug.flow(trainX, trainY, batch_size=64), validation_data=(testX, testY), steps_per_epoch=len(trainX) // 64, epochs=NUM_EPOCHS, callbacks=callbacks, verbose=1) # Evaluate the network print("[INFO] Evaluating network...")
testY = to_categorical(testY, num_classes=len(class_names)) # construct the image generator for data augmentation aug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode="nearest") # initialize the model print("[INFO] compiling model...") #model = LeNet.build(width=28, height=28, depth=3, classes=2) # cnkhanh model = LeNet.build(width=28, height=28, depth=3, classes=len(class_names)) opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS) #model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"]) # cnkhanh model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) #import tensorflow as tf #model.add(tf.keras.layers.Dense(len(class_names), activation='softmax')) # train the network print("[INFO] training network...") H = model.fit(x=aug.flow(trainX, trainY, batch_size=BS), validation_data=(testX, testY), steps_per_epoch=len(trainX) // BS, epochs=EPOCHS, verbose=1)
import cv2 from lenet import LeNet from train import CLASS_NUM mp = {'[0]': 'sun', '[1]': 'wang', '[2]': 'gong', '[3]': 'xie', '[4]': 'xu'} gpus = tf.config.experimental.list_physical_devices(device_type='GPU') for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) retinaface = Retinaface() MODEL = 'ResNet' # model = load_model("test_sign.model") ## TODO: TEMP if MODEL == 'LeNet': model = LeNet.build(width=norm_size, height=norm_size, depth=3, classes=CLASS_NUM) else: ### from keras.applications.resnet50 import ResNet50 from keras.preprocessing import image from keras.models import Model from keras.layers import Dense, GlobalAveragePooling2D from keras import backend as K # 构建不带分类器的预训练模型 base_model = ResNet50(weights='imagenet', include_top=False) # 添加全局平均池化层 x = base_model.output x = GlobalAveragePooling2D()(x)
# convert the labels from integers to vectors trainY = to_categorical(trainY, num_classes=4) testY = to_categorical(testY, num_classes=4) # construct the image generator for data augmentation aug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.2, zoom_range=0.2, horizontal_flip=False, fill_mode="nearest") # initialize the model print("[INFO] compiling model...") model = LeNet.build(width=image_dim, height=image_dim, depth=3, classes=4) opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS) model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"]) # train the network print("[INFO] training network...") H = model.fit_generator(aug.flow(trainX, trainY, batch_size=batch_size), validation_data=(testX, testY), steps_per_epoch=len(trainX) // batch_size, epochs=EPOCHS, verbose=1) # save the model to disk print("[INFO] serializing network...") model.save("trained_model")
# convert the labels from integers to vectors trainY = to_categorical(trainY, num_classes=4) testY = to_categorical(testY, num_classes=4) # construct the image generator for data augmentation aug = ImageDataGenerator(rotation_range=10, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.2, zoom_range=0.1, horizontal_flip=False, fill_mode="nearest") # initialize the model print("[INFO] compiling model...") model = LeNet.build(width=100, height=100, depth=1, classes=4) opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS) model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"]) model.summary() plot_model(model, to_file='model.png') # train the network print("[INFO] training network...") H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS), validation_data=(testX, testY), steps_per_epoch=len(trainX) // BS, epochs=EPOCHS, verbose=1) # save the model to disk
print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') #Create 10 categories (0..9) for the labels y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) print("[INFO] compiling model..") #we use stochastic gradient descent to train our network with a learning rate of 0.01 opt = SGD(lr=0.01) #initialize the model with the given parameters. model = LeNet.build( width=28, height=28, depth=1, classes=num_classes, weightsPath=args["weights"] if args["load_model"] > 0 else None) #we use categorical_crossentropy as our loss function model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) if args["load_model"] < 0: print("[INFO] training ...") model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1)
X_test /= test_std encoder = LabelEncoder() encoder.fit(y_train) y_train = encoder.transform(y_train) encoder = LabelEncoder() encoder.fit(y_test) y_test = encoder.transform(y_test) Y_train = np_utils.to_categorical(y_train, num_classes) Y_test = np_utils.to_categorical(y_test, num_classes) # initialize the model print("[INFO] compiling model...") model = LeNet.build(width=width, height=height, depth=depth, classes=num_classes) opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS) model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"]) # train the network print("[INFO] training network...") H = model.fit(X_train, Y_train, batch_size=BS, epochs=EPOCHS, verbose=1, validation_split=0.1) # save the model to disk print("[INFO] serializing network...") model.save('model.h5')
# scale data to the range of [0,1] trainData = trainData.astype('float32') / 255.0 testData = testData.astype('float32') / 255.0 # transform the training and testing labels into vectors #in the range [0, classes] trainLabels = np_utils.to_categorical(trainLabels, 10) testLabels = np_utils.to_categorical(testLabels, 10) ################################################3 # 第二部分:创建并训练模型 # initialize the optimizer and model print('[INFO] 编译模型...') opt = SGD(lr=LR) model = LeNet.build(28, 28, 10, '', 1) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) # train model print('[INFO] 训练模型...') H = model.fit(trainData, trainLabels, validation_data=(testData, testLabels), batch_size=BATCH_SIZE, epochs=EPOCHS, verbose=1) ################################################ # 第三部分:评估模型
def train(aug, trainX, trainY, testX, testY, args): # initialize the model print("[INFO] compiling model...") # base_model = VGG19(include_top=False ,weights=None,input_shape=(128,128,3),classes=5) # Lenet-5 model and Inception V3 # create the base pre-trained model # base_model = InceptionV3(weights='imagenet', include_top=False,input_shape=(150,150,3),classes=5) # add a global spatial average pooling layer # x = base_model.output # x = GlobalAveragePooling2D()(x) # let's add a fully-connected layer # x = Dense(1024, activation='relu')(x) # and a logistic layer -- let's say we have 200 classes # predictions = Dense(5, activation='softmax')(x) # model = Model(inputs=base_model.input, outputs=predictions) # model.compile(optimizer='rmsprop', loss='categorical_crossentropy',metrics=["accuracy"]) model = LeNet.build(width=norm_size, height=norm_size, depth=3, classes=CLASS_NUM) # opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS) model.compile(loss="categorical_crossentropy", optimizer='adam', metrics=["accuracy"]) # # train the network print("[INFO] training network...") H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS), validation_data=(testX, testY), steps_per_epoch=len(trainX) // (BS), epochs=EPOCHS, verbose=1) # Resent Model # model = resnest.ResnetBuilder.build_resnet_34((3, norm_size, norm_size), CLASS_NUM) # model.compile(loss='categorical_crossentropy', # optimizer='adam', # metrics=['accuracy']) # print("[INFO] training network...") # H = model.fit(trainX, trainY, # batch_size=BS, # nb_epoch=EPOCHS, # validation_data=(testX, testY), # shuffle=True, # class_weight='auto', # callbacks=[lr_reducer, early_stopper, csv_logger]) # model = make_network() # model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) # H = model.fit(trainX, trainY, # batch_size=BS, # validation_data=(testX,testY), # epochs=EPOCHS, # verbose=1, # shuffle=True # ) print("[INFO] serializing network...") # save model model.save(args) print(model.summary(), '\n') scores = model.evaluate(testX, testY) print(scores) print("model accuracy: {:.2f}".format(scores[1])) # plot the training loss and accuracy plt.style.use("ggplot") plt.figure() N = EPOCHS plt.plot(np.arange(0, N), np.array(H.history["loss"]), label="train_loss") plt.plot(np.arange(0, N), np.array(H.history["val_loss"]), label="val_loss") plt.plot(np.arange(0, N), np.array(H.history["acc"]), label="train_acc") plt.plot(np.arange(0, N), np.array(H.history["val_acc"]), label="val_acc") plt.title("Training Loss and Accuracy on traffic-sign classifier") plt.xlabel("Epoch #") plt.ylabel("Loss/Accuracy") plt.legend(loc="lower left") plt.savefig("test.png") plt.show() return model
# the data for training and the remaining 25% for testing (trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.25, random_state=42) # convert the labels from integers to vectors trainY = to_categorical(trainY, num_classes=2) testY = to_categorical(testY, num_classes=2) # construct the image generator for data augmentation aug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode="nearest") # initialize the model print("[INFO] compiling model...") model = LeNet.build(width=150, height=150, depth=3, classes=2) opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS) model.compile(loss="binary_crossentropy", optimizer='Adam', metrics=["accuracy"]) # train the network print("[INFO] training network...") H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS), validation_data=(testX, testY), steps_per_epoch=len(trainX) // BS, epochs=EPOCHS, verbose=1) # save the model to disk print("[INFO] serializing network...") model.save(args["model"]) # plot the training loss and accuracy
pic_shape = (84, 84, 3) opt = SGD(lr=0.0001) n_outputs = 4 train = True # If false, load parameters and run validation! precise_evaluation = True print "Running Experiment: " tbCallBack = keras.callbacks.TensorBoard(log_dir=log_path, histogram_freq=0, write_graph=True, write_images=False) print("[INFO] compiling model...") print model_MatFra = LeNet.build(pic_shape[0],pic_shape[1],pic_shape[2], outputs=n_outputs, mode=1, weightsPath=args["weights"] if args["load_model"] > 0 else None) model_MatFra.compile(loss="mean_squared_error", optimizer=opt , metrics=["accuracy"]) print model_MatFra.summary() model_MatFra.load_weights("/home/borg/SabBido/NN_param/checkpoint") graph = tfg.get_default_graph() bridge = CvBridge() rospy.init_node('live_error_test', anonymous=True) rospy.loginfo("Node initialized") #Initializing the testing class test_hsv = Tester()
dataset.target.astype("int"), test_size=0.33) # transform the training and testing labels into vectors in the # range [0, classes] -- this generates a vector for each label, # where the index of the label is set to `1` and all other entries # to `0`; in the case of MNIST, there are 10 class labels trainLabels = np_utils.to_categorical(trainLabels, 10) testLabels = np_utils.to_categorical(testLabels, 10) # initialize the optimizer and model print("[INFO] compiling model...") opt = SGD(lr=0.01) model = LeNet.build(width=28, height=28, depth=1, classes=10, weightsPath=weightsPath) model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) # if no weights specified train the model if weightsPath is None: print("[INFO] training...") model.fit(trainData, trainLabels, batch_size=128, nb_epoch=20, verbose=1) # show the accuracy on the testing set print("[INFO] evaluating...") (loss, accuracy) = model.evaluate(testData, testLabels,
#convert labels from int to vector trainY = to_categorical(trainY,num_classes=2) testY = to_categorical(testY,num_classes=2) #data augmentation # construct the image generator for data augmentation aug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode="nearest") #initialize the model print("[INFO] compiling model...") model = LeNet.build(widht=28,height=28,depth=3,classes=2) opt = Adam(lr=INIT_LR, decay = INIT_LR/EPOCHS) model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy']) #train the network print('[INFO] training network...') H = model.fit_generator(aug.flow(trainX,trainY,batch_size=BS), validation_data=(testX,testY), steps_per_epoch=len(trainX)//BS, epochs=EPOCHS, verbose=1) print('[INFO] serializing network...') model.save('mymodel.h5') plt.style.use("ggplot")
testData = testData.reshape((testData.shape[0], 28, 28, 1)) #Scale data to the range of [0, 1] trainData = trainData.astype("float32") / 255.0 testData = testData.astype("float32") / 255.0 #Process labels for the categorical cross-entropy loss function trainLabels = np_utils.to_categorical(trainLabels, 10) testLabels = np_utils.to_categorical(testLabels, 10) #Initialize the optimizer and model print("[INFO] compiling model...") opt = SGD(lr=0.01) model = LeNet.build( numChannels=1, imgRows=28, imgCols=28, numClasses=10, weightsPath=args["weights"] if args["load_model"] > 0 else None) model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) #Train and evaluate the model only if not loading a pre-existing model if args["load_model"] < 0: print("[INFO] training...") model.fit(trainData, trainLabels, batch_size=128, epochs=20, verbose=1) #Print the accuracy on the testing set print("[INFO] evaluating...") (loss, accuracy) = model.evaluate(testData, testLabels,
if K.image_data_format() == 'channels_first': data = data.reshape(data.shape[0], 1, 28, 28) else: data = data.reshape(data.shape[0], 28, 28, 1) (X_train, X_test, y_train, y_test) = train_test_split(data, dataset.target.astype('int'), test_size=0.25) X_train = X_train.astype('float') / 255 X_test = X_test.astype('float') / 255 lb = LabelBinarizer() y_train = lb.fit_transform(y_train) y_test = lb.transform(y_test) model = LeNet.build(28, 28, 1, 10) sgd = SGD() model.compile(sgd, 'categorical_crossentropy', ['accuracy']) H = model.fit(X_train, y_train, 64, 20, validation_data=(X_test, y_test)) #save and load model #model.save('./model/lenet_mnist_weight.hdf5') #model = load_model('./model/lenet_mnist_weight.hdf5') # model.predict.... # graph of loss and accuracy fig = plt.figure() plt.plot(np.arange(0, 20), H.history['loss'], label='training loss') plt.plot(np.arange(0, 20), H.history['val_loss'], label='validation loss') plt.plot(np.arange(0, 20), H.history['acc'], label='accuracy') plt.plot(np.arange(0, 20), H.history['val_acc'], label='validation accuracy')
valid_labels) = train_test_split(data, labels, test_size=0.25) train_labels = to_categorical(train_labels, num_classes=no_classes) valid_labels = to_categorical(valid_labels, num_classes=no_classes) aug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode="nearest") print("[INFO] compiling model...") model = LeNet.build(width=RESIZE, height=RESIZE, depth=IMG_DEPTH, classes=no_classes) opt = Adam(lr=INIT_LR, decay=INIT_LR / epochs) if no_classes == 2: model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"]) else: model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) print("[INFO] training network...") H = model.fit_generator(aug.flow(train_data, train_labels, batch_size=BS), validation_data=(valid_data, valid_labels), steps_per_epoch=len(train_data) // BS,
# convert the labels from integers to vectors trainY = to_categorical(trainY, num_classes=6) testY = to_categorical(testY, num_classes=6) # construct the image generator for data augmentation aug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode="nearest") # initialize the model print("[INFO] compiling model...") model = LeNet.build(width=width, height=height, depth=3, classes=6) opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS) model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) # train the network print("[INFO] training network...") H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS), validation_data=(testX, testY), steps_per_epoch=len(trainX) // BS, epochs=EPOCHS, verbose=1) # save the model to disk print("[INFO] serializing network...")
X_test = X_test.astype('float32') X_train /= 255 X_test /= 255 # we need a 60k x [1 x 28 x 28] shape as input to the CONVNET X_train = X_train[:, np.newaxis, :, :] X_test = X_test[:, np.newaxis, :, :] print(X_train.shape[0], 'train samples') print(X_test.shape[1], 'test samples') # convert class vectors to binary class matrices y_train = np_utils.to_categorical(y_train, NB_CLASSES) y_test = np_utils.to_categorical(y_test, NB_CLASSES) # initialize the optimizer and model model = LeNet.build(input_shape=INPUT_SHAPE, classes=NB_CLASSES) model.compile(loss="categorical_crossentropy", optimizer=OPTIMIZER, metrics=['accuracy']) history = model.fit(X_train, y_train, batch_size=BATCH_SIZE, epochs=NB_EPOCH, verbose=VERBOSE, validation_split=VALIDATION_SPLIT) score = model.evaluate(X_test, y_test, verbose=VERBOSE) print("Test Score:", score[0]) print("Test accuracy:", score[1]) # list all data in history print(history.history.keys())
#Shuffle the dataset x, y = shuffle(sketch_data, Y, random_state=10) print("x :", x) print("y : ", y) # Split the dataset X_train, X_test, Y_train, Y_test = train_test_split(x, y, test_size=0.2) #print("X_train: ", X_train) #print("X_test: ", X_test) #print("Y_train: ", Y_train) #print("Y_test: ", Y_test) # initialize the model print("[INFO] compiling model...") #model = LeNet.build(width=ROWS, height=COLS, depth=CHANNELS, classes=num_of_classes) model = LeNet.build(width=ROWS, height=COLS, depth=CHANNELS, classes=num_of_classes) opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS) model.compile(loss=categorical_crossentropy, optimizer=opt, metrics=['accuracy']) # train the network print("[INFO] training network...") hist = model.fit(X_train, Y_train, batch_size=BS, epochs=EPOCHS, verbose=1, validation_data=(X_test, Y_test))
# convert the labels from integers to vectors trainY = to_categorical(trainY, num_classes=2) testY = to_categorical(testY, num_classes=2) # construct the image generator for data augmentation aug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode="nearest") # initialize the model print("[INFO] compiling model...") model = LeNet.build(width=image_size, height=image_size, depth=3, classes=2) opt = Adam(lr=learning_rate, decay=learning_rate / epochs) model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"]) # train the network print("[INFO] training network...") H = model.fit_generator(aug.flow(trainX, trainY, batch_size=batch_size), validation_data=(testX, testY), steps_per_epoch=len(trainX) // batch_size, epochs=epochs, verbose=1) # save the model to disk print("[INFO] serializing network...") model.save(args["model"])
# construct the training image generator for data augmentation aug = ImageDataGenerator(rotation_range=20, zoom_range=0.15, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.15, horizontal_flip=True, fill_mode="nearest") # initialize the optimizer and model print("[INFO] compiling model...") # opt = SGD(lr=0.01) opt = Adam(lr=1e-4, decay=1e-4 / args["epochs"]) model = LeNet.build( numChannels=1, width=32, height=32, numClasses=62, weightsPath=args["weights"] if args["load_model"] > 0 else None) model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) print(model.summary()) # only train and evaluate the model if we *are not* loading a # pre-existing model if args["load_model"] < 0: print("[INFO] training...") # H = model.fit(trainX, trainY, batch_size=128, epochs=args["epochs"], # verbose=1) H = model.fit_generator(
data = data.reshape(data.shape[0], 1, 28, 28) else: data = data.reshape(data.shape[0], 28, 28, 1) (trainX, testX, trainY, testY) = train_test_split(data / 255.0, dataset.target.astype("int"), test_size=0.25, random_state=42) lb = LabelBinarizer() trainY = lb.fit_transform(trainY) testY = lb.transform(testY) print("[INFO] compiling model...") opt = SGD(lr=0.01) model = LeNet.build(width=28, height=28, depth=1, classes=10) model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) print("[INFO] training network...") H = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=128, epochs=20, verbose=1) print("[INFO] evaluating network...") predictions = model.predict(testX, batch_size=128) print(
# dimensions of our images. img_width, img_height = 150, 150 train_data_dir = os.path.join(args['dataset'], 'train') validation_data_dir = os.path.join(args['dataset'], 'validation') test_data_dir = os.path.join(args['dataset'], 'minitest') nb_epoch = 100 nb_train_samples = 2000 nb_validation_samples = 800 class_labels = os.listdir(train_data_dir) class_labels.sort() clog.info('Classes: {}'.format(class_labels)) # Initialize the optimizer and model. clog.info('Initializing model...') model = LeNet.build(width=img_width, height=img_height, depth=3, num_classes=2, weights_path=args['weights_file'] if args['load_model'] > 0 else None) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) # Only train and evaluate the model if we *are not* loading a # pre-existing model. if not args['load_model']: # this is the augmentation configuration we will use for training train_datagen = ImageDataGenerator( rescale=1./255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)