def RecognizeImg(model, mean, FileName = FLAGS.targetfile): X = DirectReadImg(FileName) X = du.featurewise_zero_center(X, mean) pred = model.predict([X[0]]) print(pred) return pred
def train(X_train_R, Y_train, X_test_R, Y_test, predict_data): X_train_R, mean = du.featurewise_zero_center(X_train_R) X_test_R = du.featurewise_zero_center(X_test_R, mean) net = tflearn.input_data(shape=[None, 28, 28, 1]) net = tflearn.conv_2d(net, 64, 3, activation='relu', bias=False) net = tflearn.residual_bottleneck(net, 3, 16, 64) net = tflearn.residual_bottleneck(net, 1, 32, 128, downsample=True) net = tflearn.residual_bottleneck(net, 2, 32, 128) net = tflearn.residual_bottleneck(net, 1, 64, 256, downsample=True) net = tflearn.residual_bottleneck(net, 2, 64, 256) net = tflearn.batch_normalization(net) net = tflearn.activation(net, 'relu') net = tflearn.global_avg_pool(net) net = tflearn.fully_connected(net, 10, activation='softmax') net = tflearn.regression(net, optimizer='momentum', loss='categorical_crossentropy', learning_rate=0.1) model = tflearn.DNN(net, checkpoint_path='model_Digit', max_checkpoints=2, tensorboard_verbose=0, tensorboard_dir='logs') model.fit(X_train_R, Y_train, n_epoch=1, validation_set=(X_test_R, Y_test), show_metric=True, batch_size=10, run_id='Digit') model.save("model_Digit/digit_recognition") print("model train OK!\n save model OK!") model.load("model_Digit/digit_recognition") print("model already loaded!") print("model predict") label = model.predict(predict_data) print("predict label: ", label)
def demo(): import tflearn.datasets.mnist as mnist x, y, test_x, test_y = mnist.load_data(one_hot='True') print(x.shape) x = x.reshape([-1, 28, 28, 1]) test_x = test_x.reshape([-1, 28, 28, 1]) # 按功能划分的零中心将每个样本的中心置零,并指定平均值。如果未指定,则对所有样品评估平均值。 # Returns : A numpy array with same shape as input. Or a tuple (array, mean) if no mean value was specified. x, mean = du.featurewise_zero_center(x) test_x = du.featurewise_zero_center(test_x, mean) net = tflearn.input_data(shape=[None, 28, 28, 1]) net = tflearn.conv_2d(net, 64, 3, activation='relu', bias=False) net = tflearn.residual_bottleneck(net, 3, 16, 64) net = tflearn.residual_bottleneck(net, 1, 32, 128, downsample=True) net = tflearn.residual_bottleneck(net, 2, 32, 128) net = tflearn.residual_bottleneck(net, 1, 64, 256, downsample=True) net = tflearn.residual_bottleneck(net, 2, 64, 256) net = tflearn.batch_normalization(net) net = tflearn.activation(net, 'relu') net = tflearn.global_avg_pool(net) # Regression net = tflearn.fully_connected(net, 10, activation='softmax') net = tflearn.regression(net, optimizer='momentum', loss='categorical_crossentropy', learning_rate=0.1) # Training model = tflearn.DNN(net, checkpoint_path='model_resnet_mnist', max_checkpoints=10, tensorboard_verbose=0) model.fit(x, y, n_epoch=100, validation_set=(test_x, test_y), show_metric=True, batch_size=256, run_id='resnet_mnist')
def RecognizeImg(model, mean, FileName=FLAGS.targetfile): imgsize = FLAGS.sample_size if (FLAGS.modeltype == 1): imgsize = 32 X = DirectReadImg(FileName, Imgheight=imgsize, Imgwidth=imgsize, normlized=True) X = du.featurewise_zero_center(X, mean) pred = model.predict([X[0]]) print(pred) return pred
trainx = [] path_train = [] for img in tqdm(os.listdir("C:/Users/aviga/Desktop/Train_5_letters")): path = os.path.join("C:/Users/aviga/Desktop/Train_5_letters", img) path_train.append(path) sort_path_train = sorted(path_train, key=sort_internet_dataset) for path in path_train: img = Image.open(path) img = resizeimage.resize_cover(img, [32, 32]) img = np.asarray(img) trainx.append(img) trainx = np.asarray(trainx) trainx = trainx.reshape([-1, 32, 32, 1]) trainx, mean1 = du.featurewise_zero_center(trainx) # train_y trainy = pd.read_csv("C:/Users/aviga/Desktop/y_Train_5_letters.csv", header=None) trainy = trainy.values.astype('int32') - 1 trainy = to_categorical(trainy, 30) # train2 trainx2 = [] path_train_2 = [] for img in tqdm(os.listdir("C:/Users/aviga/Desktop/train")): path = os.path.join("C:/Users/aviga/Desktop/train", img) path_train_2.append(path) sort_path_train_2 = sorted(path_train_2, key=sort_our_dataset) for path in sort_path_train_2:
Links: - [Deep Residual Network](http://arxiv.org/pdf/1512.03385.pdf) - [CIFAR-10 Dataset](https://www.cs.toronto.edu/~kriz/cifar.html) """ from __future__ import division, print_function, absolute_import import tflearn import tflearn.data_utils as du # Data loading from tflearn.datasets import cifar10 (X, Y), (testX, testY) = cifar10.load_data() # Data pre-processing X, mean = du.featurewise_zero_center(X) X, std = du.featurewise_std_normalization(X) testX = du.featurewise_zero_center(testX, mean) testX = du.featurewise_std_normalization(testX, std) Y = du.to_categorical(Y, 10) testY = du.to_categorical(testY, 10) # Building Residual Network net = tflearn.input_data(shape=[None, 32, 32, 3]) net = tflearn.conv_2d(net, 32, 3) net = tflearn.batch_normalization(net) net = tflearn.activation(net, 'relu') net = tflearn.shallow_residual_block(net, 4, 32, regularizer='L2') net = tflearn.shallow_residual_block(net, 1, 32, downsample=True, regularizer='L2') net = tflearn.shallow_residual_block(net, 4, 64, regularizer='L2')
print('mean pickle is here!') else: # Load mean from original image files: dataset_file = 'trainfilelist.txt' # Build the preloader array, resize images to sample size from tflearn.data_utils import image_preloader X, Y = image_preloader(dataset_file, image_shape=(FLAGS.sample_size, FLAGS.sample_size), mode='file', categorical_labels=True, normalize=True) #Reshape X X = np.array(X) X = X.reshape([-1, FLAGS.sample_size, FLAGS.sample_size, 1]) X, mean = du.featurewise_zero_center(X) #write to pickle file pkl_file = open('Inceptionmean.pkl', 'wb') pickle.dump(mean, pkl_file) pkl_file.close() #load resnet model network = create_Inception(3) model = tflearn.DNN(network) model.load(FLAGS.modelfile) if(FLAGS.filemode): RecognizeImg(model, mean, FLAGS.targetfile) else: DirectoryFileEnvaluation(model, mean, FLAGS.inputpath, 'jpeg')