def alexnet(width, height, lr, output=3): network = input_data(shape=[None, width, height, 1], name='input') network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, output, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model
def build_network(self): # Building 'AlexNet' # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py # https://github.com/DT42/squeezenet_demo # https://github.com/yhenon/pysqueezenet/blob/master/squeezenet.py print('[+] Building CNN') self.network = input_data(shape = [None, SIZE_FACE, SIZE_FACE, 1]) self.network = conv_2d(self.network, 96, 11, strides = 4, activation = 'relu') self.network = max_pool_2d(self.network, 3, strides = 2) self.network = local_response_normalization(self.network) self.network = conv_2d(self.network, 256, 5, activation = 'relu') self.network = max_pool_2d(self.network, 3, strides = 2) self.network = local_response_normalization(self.network) self.network = conv_2d(self.network, 256, 3, activation = 'relu') self.network = max_pool_2d(self.network, 3, strides = 2) self.network = local_response_normalization(self.network) self.network = fully_connected(self.network, 1024, activation = 'tanh') self.network = dropout(self.network, 0.5) self.network = fully_connected(self.network, 1024, activation = 'tanh') self.network = dropout(self.network, 0.5) self.network = fully_connected(self.network, len(EMOTIONS), activation = 'softmax') self.network = regression(self.network, optimizer = 'momentum', loss = 'categorical_crossentropy') self.model = tflearn.DNN( self.network, checkpoint_path = SAVE_DIRECTORY + '/alexnet_mood_recognition', max_checkpoints = 1, tensorboard_verbose = 2 ) self.load_model()
def train_nmf_network(mfcc_array, sdr_array, n_epochs, take): """ :param mfcc_array: :param sdr_array: :param n_epochs: :param take: :return: """ with tf.Graph().as_default(): network = input_data(shape=[None, 13, 100, 1]) network = conv_2d(network, 32, [5, 5], activation="relu", regularizer="L2") network = max_pool_2d(network, 2) network = conv_2d(network, 64, [5, 5], activation="relu", regularizer="L2") network = max_pool_2d(network, 2) network = fully_connected(network, 128, activation="relu") network = dropout(network, 0.8) network = fully_connected(network, 256, activation="relu") network = dropout(network, 0.8) network = fully_connected(network, 1, activation="linear") regress = tflearn.regression(network, optimizer="rmsprop", loss="mean_square", learning_rate=0.001) # Training model = tflearn.DNN(regress) # , session=sess) model.fit( mfcc_array, sdr_array, n_epoch=n_epochs, snapshot_step=1000, show_metric=True, run_id="repet_choice_{0}_epochs_take_{1}".format(n_epochs, take), ) return model
def build_model_anything_happening(): ### IS ANY OF THIS NECESSARY FOR LIGHT/DARK? IN GENERAL W/ STAIONARY CAMERA? img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() # Specify shape of the data, image prep network = input_data(shape=[None, 52, 64], data_preprocessing=img_prep, data_augmentation=img_aug) # Since the image position remains consistent and are fairly similar, this can be spatially aware. # Using a fully connected network directly, no need for convolution. network = fully_connected(network, 2048, activation='relu') network = fully_connected(network, 2, activation='softmax') network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.00003) model = tflearn.DNN(network, tensorboard_verbose=0) return model
def train_repet_network(beat_spectrum_array, sdr_array, n_epochs, take): """ :param beat_spectrum_array: :param sdr_array: :param n_epochs: :param take: :return: """ beat_spec_len = 432 with tf.Graph().as_default(): input_layer = input_data(shape=[None, beat_spec_len, 1]) conv1 = conv_1d(input_layer, 32, 4, activation="relu", regularizer="L2") max_pool1 = max_pool_1d(conv1, 2) conv2 = conv_1d(max_pool1, 64, 80, activation="relu", regularizer="L2") max_pool2 = max_pool_1d(conv2, 2) fully1 = fully_connected(max_pool2, 128, activation="relu") dropout1 = dropout(fully1, 0.8) fully2 = fully_connected(dropout1, 256, activation="relu") dropout2 = dropout(fully2, 0.8) linear = fully_connected(dropout2, 1, activation="linear") regress = tflearn.regression(linear, optimizer="rmsprop", loss="mean_square", learning_rate=0.001) # Training model = tflearn.DNN(regress) # , session=sess) model.fit( beat_spectrum_array, sdr_array, n_epoch=n_epochs, snapshot_step=1000, show_metric=True, run_id="repet_choice_{0}_epochs_take_{1}".format(n_epochs, take), ) return model
def createModel(nbClasses,imageSize): print("[+] Creating model...") convnet = input_data(shape=[None, imageSize, imageSize, 1], name='input') convnet = conv_2d(convnet, 64, 2, activation='elu', weights_init="Xavier") convnet = max_pool_2d(convnet, 2) convnet = conv_2d(convnet, 128, 2, activation='elu', weights_init="Xavier") convnet = max_pool_2d(convnet, 2) convnet = conv_2d(convnet, 256, 2, activation='elu', weights_init="Xavier") convnet = max_pool_2d(convnet, 2) convnet = conv_2d(convnet, 512, 2, activation='elu', weights_init="Xavier") convnet = max_pool_2d(convnet, 2) convnet = fully_connected(convnet, 1024, activation='elu') convnet = dropout(convnet, 0.5) convnet = fully_connected(convnet, nbClasses, activation='softmax') convnet = regression(convnet, optimizer='rmsprop', loss='categorical_crossentropy') model = tflearn.DNN(convnet) print(" Model created! ✅") return model
def alexnet(): X, Y = oxflower17.load_data(one_hot=True, resize_pics=(227, 227)) # Building 'AlexNet' network = input_data(shape=[None, 227, 227, 3]) network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 17, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=0.001) # Training model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1, tensorboard_verbose=2) model.fit(X, Y, n_epoch=1000, validation_set=0.1, shuffle=True, show_metric=True, batch_size=64, snapshot_step=200, snapshot_epoch=False, run_id='alexnet')
def cnn(): X, Y, testX, testY = mnist.load_data(one_hot=True) X = X.reshape([-1, 28, 28, 1]) testX = testX.reshape([-1, 28, 28, 1]) # Building convolutional network network = input_data(shape=[None, 28, 28, 1], name='input') network = conv_2d(network, 32, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = conv_2d(network, 64, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = fully_connected(network, 128, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 10, activation='softmax') network = regression(network, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='target') # Training model = tflearn.DNN(network, tensorboard_verbose=0) model.fit({'input': X}, {'target': Y}, n_epoch=20, validation_set=({'input': testX}, {'target': testY}), snapshot_step=100, show_metric=True, run_id='cnn_demo')
def _model1(): global yTest, img_aug tf.reset_default_graph() img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() network = input_data(shape=[None, inputSize, inputSize, dim], name='input', data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 32, 3, strides = 4, activation='relu') network = max_pool_2d(network, 2, strides=2) network = local_response_normalization(network) network = conv_2d(network, 64, 3, strides = 2, activation='relu') network = max_pool_2d(network, 2, strides=2) network = local_response_normalization(network) network = fully_connected(network, 128, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, len(Y[0]), activation='softmax') network = regression(network, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy', name='target') model = tflearn.DNN(network, tensorboard_verbose=3) model.fit(X, Y, n_epoch=epochNum, validation_set=(xTest, yTest), snapshot_step=500, show_metric=True, batch_size=batchNum, shuffle=True, run_id=_id + 'artClassification') if modelStore: model.save(_id + '-model.tflearn')
def do_cnn_doc2vec_2d(trainX, testX, trainY, testY): print "CNN and doc2vec 2d" trainX = trainX.reshape([-1, max_features, max_document_length, 1]) testX = testX.reshape([-1, max_features, max_document_length, 1]) # Building convolutional network network = input_data(shape=[None, max_features, max_document_length, 1], name='input') network = conv_2d(network, 16, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = conv_2d(network, 32, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = fully_connected(network, 128, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 10, activation='softmax') network = regression(network, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='target') # Training model = tflearn.DNN(network, tensorboard_verbose=0) model.fit({'input': trainX}, {'target': trainY}, n_epoch=20, validation_set=({'input': testX}, {'target': testY}), snapshot_step=100, show_metric=True, run_id='review')
def main(): """ :return: """ # pickle parameters fg_or_bg = 'background' sdr_type = 'sdr' feature = 'sim_mat' beat_spec_len = 432 # set up training, testing, & validation partitions sim_mat_array, sdr_array = get_generated_data(feature, fg_or_bg, sdr_type) # training params n_classes = 10 n_training_steps = 1000 training_step_size = 100 training_percent = 0.85 testing_percent = 0.15 validation_percent = 0.00 sdr_array_1h, hist = sdrs_to_one_hots(sdr_array, n_classes, True) train, test, validate = split_into_sets(len(sim_mat_array), training_percent, testing_percent, validation_percent) # Building convolutional network network = input_data(shape=[None, beat_spec_len, 1], name='input') network = conv_1d(network, 32, 3, activation='relu', regularizer="L2") network = max_pool_1d(network, 2) # network = local_response_normalization(network) # network = batch_normalization(network) # network = conv_1d(network, 64, 3, activation='relu', regularizer="L2") # network = max_pool_1d(network, 2) # network = local_response_normalization(network) # network = batch_normalization(network) # network = fully_connected(network, 128, activation='tanh') # network = dropout(network, 0.5) network = fully_connected(network, 512, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, n_classes, activation='softmax') # network = fully_connected(network, 1, activation='linear') network = regression(network, optimizer='adagrad', learning_rate=0.01, loss='categorical_crossentropy', name='target') X = np.expand_dims(sim_mat_array, -1) Y = np.array(sdr_array_1h) # X = np.expand_dims([beat_spec_array[i] for i in train], -1) # Y = np.array([sdr_array_1h[i] for i in train]) # testX = np.expand_dims([beat_spec_array[i] for i in test], -1) # testY = np.array([sdr_array[i] for i in test]) # Training model = tflearn.DNN(network, tensorboard_verbose=1) model.fit({'input': X}, {'target': Y}, n_epoch=20, validation_set=0.1, snapshot_step=1000, show_metric=True, run_id='{} classes'.format(n_classes - 1))
def main(): pickle_folder = '../pickles_rolloff' pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))] pickle_folders_to_load = sorted(pickle_folders_to_load) # pickle parameters fg_or_bg = 'background' sdr_type = 'sdr' feature = 'sim_mat' beat_spec_len = 432 # training params n_classes = 16 training_percent = 0.85 testing_percent = 0.15 validation_percent = 0.00 # set up training, testing, & validation partitions print('Loading sim_mat and sdrs') sim_mat_array, sdr_array = get_generated_data(feature, fg_or_bg, sdr_type) print('sim_mat and sdrs loaded') print('splitting and grooming data') train, test, validate = split_into_sets(len(pickle_folders_to_load), training_percent, testing_percent, validation_percent) trainX = np.expand_dims([sim_mat_array[i] for i in train], -1) trainY = np.expand_dims([sdr_array[i] for i in train], -1) testX = np.expand_dims([sim_mat_array[i] for i in test], -1) testY = np.array([sdr_array[i] for i in test]) print('setting up CNN') # Building convolutional network network = input_data(shape=[None, beat_spec_len, beat_spec_len, 1]) network = conv_2d(network, 32, 10, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = conv_2d(network, 64, 20, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = fully_connected(network, 128, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 1, activation='linear') regress = tflearn.regression(network, optimizer='sgd', loss='mean_square', learning_rate=0.01) print('running CNN') # Training model = tflearn.DNN(regress, tensorboard_verbose=1) model.fit(trainX, trainY, n_epoch=10, snapshot_step=1000, show_metric=True, run_id='{} classes'.format(n_classes - 1)) predicted = np.array(model.predict(testX))[:,0] print('plotting') plot(testY, predicted)
def main(): pickle_folder = '../pickles_rolloff' pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))] pickle_folders_to_load = sorted(pickle_folders_to_load) # pickle parameters fg_or_bg = 'background' sdr_type = 'sdr' feature = 'beat_spec' beat_spec_len = 432 # training params n_classes = 16 training_percent = 0.85 testing_percent = 0.15 validation_percent = 0.00 # set up training, testing, & validation partitions beat_spec_array, sdr_array = load_beat_spec_and_sdrs(pickle_folders_to_load, pickle_folder, feature, fg_or_bg, sdr_type) train, test, validate = split_into_sets(len(pickle_folders_to_load), training_percent, testing_percent, validation_percent) trainX = np.expand_dims([beat_spec_array[i] for i in train], -1) trainY = np.expand_dims([sdr_array[i] for i in train], -1) testX = np.expand_dims([beat_spec_array[i] for i in test], -1) testY = np.array([sdr_array[i] for i in test]) # Building convolutional network network = input_data(shape=[None, beat_spec_len, 1]) network = conv_1d(network, 32, 4, activation='relu', regularizer="L2") network = max_pool_1d(network, 2) network = conv_1d(network, 64, 80, activation='relu', regularizer="L2") network = max_pool_1d(network, 2) network = fully_connected(network, 128, activation='relu') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='relu') # look for non-tanh things??? network = dropout(network, 0.8) network = fully_connected(network, 1, activation='linear') regress = tflearn.regression(network, optimizer='sgd', loss='mean_square', learning_rate=0.01) # Training model = tflearn.DNN(regress, tensorboard_verbose=1) model.fit(trainX, trainY, n_epoch=100, snapshot_step=1000, show_metric=True, run_id='relus_100_3') predicted = np.array(model.predict(testX))[:,0] # pprint.pprint() print("Test MSE: ", np.square(testY - predicted).mean()) plot(testY, predicted)
def build_model_1_conv(learning_rate, input_shape, nb_classes, base_path , drop): network = input_data(shape=input_shape, name='input') network = conv_2d(network, 64, [4, 16], activation='relu') network = fully_connected(network, 128, activation='relu') network = dropout(network, drop) network = fully_connected(network, 64, activation='relu') network = dropout(network, drop) network = fully_connected(network, nb_classes, activation='softmax') network = regression(network, optimizer='sgd', learning_rate=learning_rate, loss='categorical_crossentropy', name='target') model = tflearn.DNN(network, tensorboard_verbose=3, tensorboard_dir=base_path + "/tflearn_logs/", checkpoint_path=base_path + "/checkpoints/step") return model
def main(): pickle_folder = 'pickles_combined' # pickle parameters fg_or_bg = 'background' sdr_type = 'sdr' feature = 'beat_spec' # training params training_percent = 0.85 testing_percent = 0.15 validation_percent = 0.00 beat_spec_max = 355 # set up training, testing, & validation partitions beat_spec_array, sdr_array = unpickle_beat_spec_and_sdrs(pickle_folder, beat_spec_max) train, test, validate = split_into_sets(len(beat_spec_array), training_percent, testing_percent, validation_percent) trainX = np.expand_dims([beat_spec_array[i] for i in train], -1) trainY = np.expand_dims([sdr_array[i] for i in train], -1) testX = np.expand_dims([beat_spec_array[i] for i in test], -1) testY = np.array([sdr_array[i] for i in test]) # Building convolutional network network = input_data(shape=[None, beat_spec_max, 1]) network = conv_1d(network, 32, 4, activation='relu', regularizer="L2") network = max_pool_1d(network, 2) network = conv_1d(network, 64, 80, activation='relu', regularizer="L2") network = max_pool_1d(network, 2) network = fully_connected(network, 128, activation='relu') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='relu') # look for non-tanh things??? network = dropout(network, 0.8) network = fully_connected(network, 1, activation='linear') regress = tflearn.regression(network, optimizer='sgd', loss='mean_square', learning_rate=0.01) start = time.time() # Training model = tflearn.DNN(regress, tensorboard_verbose=1) model.fit(trainX, trainY, n_epoch=2000, snapshot_step=1000, show_metric=True, run_id='mir1k_2000_truncate') elapsed = (time.time() - start) predicted = np.array(model.predict(testX))[:,0] print("Test MSE: ", np.square(testY - predicted).mean()) print(elapsed, "seconds") plot(testY, predicted)
def generate_network(self): """ Return tflearn cnn network. """ print(self.image_size, self.n_epoch, self.batch_size, self.person_ids) print(type(self.image_size), type(self.n_epoch), type(self.batch_size), type(self.person_ids)) if not isinstance(self.image_size, list) \ or not isinstance(self.n_epoch, int) \ or not isinstance(self.batch_size, int) \ or not isinstance(self.person_ids, list): # if self.image_size is None or self.n_epoch is None or \ # self.batch_size is None or self.person_ids is None: raise ValueError("Insufficient values to generate network.\n" "Need (n_epoch, int), (batch_size, int)," "(image_size, list), (person_ids, list).") # Real-time data preprocessing img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Real-time data augmentation img_aug = ImageAugmentation() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_flip_leftright() # Convolutional network building network = input_data( shape=[None, self.image_size[0], self.image_size[1], 3], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, self.image_size[0], self.IMAGE_CHANNEL_NUM, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, self.image_size[0] * 2, self.IMAGE_CHANNEL_NUM, activation='relu') network = conv_2d(network, self.image_size[0] * 2, self.IMAGE_CHANNEL_NUM, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, self.image_size[0] * 2**4, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, self.person_num, activation='softmax') network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) return network
def make_core_network(network): network = tflearn.reshape(network, [-1, 28, 28, 1], name="reshape") network = conv_2d(network, 32, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = conv_2d(network, 64, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = fully_connected(network, 128, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 10, activation='softmax') return network
def _model3(): global yTest, img_aug tf.reset_default_graph() img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() network = input_data(shape=[None, inputSize, inputSize, dim], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, len(yTest[0]), activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=0.001) print('Model has been made!!!?') # Training model = tflearn.DNN(network, checkpoint_path='model_densenet_cifar10', max_checkpoints=10, tensorboard_verbose=0, clip_gradients=0.) model.load(_path) pred = model.predict(xTest) df = pd.DataFrame(pred) df.to_csv(_path + ".csv") newList = pred.copy() newList = convert2(newList) if _CSV: makeCSV(newList) pred = convert2(pred) pred = convert3(pred) yTest = convert3(yTest) print(metrics.confusion_matrix(yTest, pred)) print(metrics.classification_report(yTest, pred)) print('Accuracy', accuracy_score(yTest, pred)) print() if _wrFile: writeTest(pred)
def create_single_digit_model(): input_layer, last_cnn_layer = create_cnn_layers() # h = Dense(256, activation='relu')(last_cnn_layer) h = fully_connected(last_cnn_layer, 256, activation='relu', weights_init=INIT) # h = Dropout(0.5)(h) h = dropout(h, 1-0.5) # output_layer = Dense(CLASS_COUNT, activation='softmax', name='out')(h) output_layer = fully_connected(h, CLASS_COUNT, activation='softmax', weights_init=INIT) network = regression(output_layer, optimizer=OPTIMIZER, learning_rate=0.1, loss='categorical_crossentropy', name='out') # model = Model(input_layer, output_layer) model = tflearn.DNN(network, tensorboard_verbose=3, tensorboard_dir='./logs/') return model
def setup_model(checkpoint_path=None): """Sets up a deep belief network for image classification based on the set up described in :param checkpoint_path: string path describing prefix for model checkpoints :returns: Deep Neural Network :rtype: tflearn.DNN References: - Machine Learning is Fun! Part 3: Deep Learning and Convolutional Neural Networks Links: - https://medium.com/@ageitgey/machine-learning-is-fun-part-3-deep-learning-and-convolutional-neural-networks-f40359318721 """ # Make sure the data is normalized img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Create extra synthetic training data by flipping, rotating and blurring the # images on our data set. img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) img_aug.add_random_blur(sigma_max=3.) # Input is a 32x32 image with 3 color channels (red, green and blue) network = input_data(shape=[None, 32, 32, 3], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 32, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, 512, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, 2, activation='softmax') network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) if checkpoint_path: model = tflearn.DNN(network, tensorboard_verbose=3, checkpoint_path=checkpoint_path) else: model = tflearn.DNN(network, tensorboard_verbose=3) return model
def main(): """ :return: """ pickle_folder = '../NMF/mfcc_pickles' pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))] fg_or_bg = 'background' sdr_type = 'sdr' feature = 'mfcc_clusters' beat_spec_len = 432 n_epochs = 200 take = 1 # set up training, testing, & validation partitions mfcc_array, sdr_array = load_mfcc_and_sdrs(pickle_folders_to_load, pickle_folder, feature, fg_or_bg, sdr_type) mfcc_array = np.expand_dims(mfcc_array, -1) sdr_array = np.expand_dims(sdr_array, -1) # Building convolutional network network = input_data(shape=[None, 13, 100, 1]) network = conv_2d(network, 32, [5, 5], activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = conv_2d(network, 64, [5, 5], activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = fully_connected(network, 128, activation='relu') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='relu') network = dropout(network, 0.8) network = fully_connected(network, 1, activation='linear') regress = tflearn.regression(network, optimizer='rmsprop', loss='mean_square', learning_rate=0.001) start = time.time() # Training model = tflearn.DNN(regress) # , session=sess) model.fit(mfcc_array, sdr_array, n_epoch=n_epochs, snapshot_step=1000, show_metric=True, run_id='repet_save_{0}_epochs_take_{1}'.format(n_epochs, take)) elapsed = (time.time() - start) print('Finished training after ' + elapsed + 'seconds. Saving...') model_output_folder = 'network_outputs/' model_output_file = join(model_output_folder, 'nmf_save_{0}_epochs_take_{1}'.format(n_epochs, take)) model.save(model_output_file)
def do_cnn(trainX, trainY,testX, testY): global n_words # Data preprocessing # Sequence padding trainX = pad_sequences(trainX, maxlen=MAX_DOCUMENT_LENGTH, value=0.) testX = pad_sequences(testX, maxlen=MAX_DOCUMENT_LENGTH, value=0.) # Converting labels to binary vectors trainY = to_categorical(trainY, nb_classes=2) testY = to_categorical(testY, nb_classes=2) # Building convolutional network network = input_data(shape=[None, MAX_DOCUMENT_LENGTH], name='input') network = tflearn.embedding(network, input_dim=n_words+1, output_dim=128) branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2") branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2") branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2") network = merge([branch1, branch2, branch3], mode='concat', axis=1) network = tf.expand_dims(network, 2) network = global_max_pool(network) network = dropout(network, 0.5) network = fully_connected(network, 2, activation='softmax') network = regression(network, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy', name='target') # Training model = tflearn.DNN(network, tensorboard_verbose=0) model.fit(trainX, trainY, n_epoch = 20, shuffle=True, validation_set=(testX, testY), show_metric=True, batch_size=32)
def inference(images, dataset_meta): """ Build a tiny CNN model. Parameters ---------- images : tensor Images returned from distorted_inputs() or inputs(). dataset_meta : dict Has key 'n_classes' Returns ------- logits """ net = tf.reshape(images, [-1, dataset_meta['image_width'], dataset_meta['image_height'], dataset_meta['image_depth']]) net = tflearn.layers.conv.conv_2d(net, nb_filter=16, filter_size=3, activation='relu', strides=1, weight_decay=0.0) y_conv = fully_connected(net, dataset_meta['n_classes'], activation='softmax', weights_init='truncated_normal', bias_init='zeros', regularizer=None, weight_decay=0) return y_conv
def do_cnn_doc2vec(trainX, testX, trainY, testY): global max_features print "CNN and doc2vec" #trainX = pad_sequences(trainX, maxlen=max_features, value=0.) #testX = pad_sequences(testX, maxlen=max_features, value=0.) # Converting labels to binary vectors trainY = to_categorical(trainY, nb_classes=2) testY = to_categorical(testY, nb_classes=2) # Building convolutional network network = input_data(shape=[None,max_features], name='input') network = tflearn.embedding(network, input_dim=1000000, output_dim=128,validate_indices=False) branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2") branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2") branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2") network = merge([branch1, branch2, branch3], mode='concat', axis=1) network = tf.expand_dims(network, 2) network = global_max_pool(network) network = dropout(network, 0.8) network = fully_connected(network, 2, activation='softmax') network = regression(network, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy', name='target') # Training model = tflearn.DNN(network, tensorboard_verbose=0) model.fit(trainX, trainY, n_epoch=5, shuffle=True, validation_set=(testX, testY), show_metric=True, batch_size=100,run_id="review")
def get_nn_model(checkpoint_path='nn_motor_model', session=None): # Input is a single value (raw motor value) network = input_data(shape=[None, 1], name='input') # Hidden layer no.1, network = fully_connected(network, 12, activation='linear') # Output layer network = fully_connected(network, 1, activation='tanh') # regression network = regression(network, loss='mean_square', metric='accuracy', name='target') # Verbosity yay nay model = tflearn.DNN(network, tensorboard_verbose=3, checkpoint_path=checkpoint_path, session=session) return model
def inference(images, dataset_meta): """ Build the CIFAR-10 model. Parameters ---------- images : tensor Images returned from distorted_inputs() or inputs(). dataset_meta : dict Has key 'n_classes' Returns ------- logits """ net = tf.reshape(images, [-1, dataset_meta['image_width'], dataset_meta['image_height'], dataset_meta['image_depth']]) net = tflearn.layers.core.flatten(net, name='Flatten') y_conv = fully_connected(net, dataset_meta['n_classes'], activation='softmax', weights_init='truncated_normal', bias_init='zeros', regularizer=None, weight_decay=0) return y_conv
def convolutional_neural_network(width=5, height=6): """Create the neural network model. Args: width: Width of the pseudo image height: Height of the pseudo image Returns: convnet: Output """ # Initialize key variables conv1_filter_count = 32 conv2_filter_count = 64 fc_units = 1024 image_height = height image_width = width filter_size = 2 pooling_kernel_size = 2 keep_probability = 0.6 fully_connected_units = 10 # Create the convolutional network stuff convnet = input_data( shape=[None, image_width, image_height, 1], name='input') convnet = conv_2d( convnet, conv1_filter_count, filter_size, activation='relu') convnet = max_pool_2d(convnet, pooling_kernel_size) convnet = conv_2d( convnet, conv2_filter_count, filter_size, activation='relu') convnet = max_pool_2d(convnet, pooling_kernel_size) convnet = fully_connected(convnet, fc_units, activation='relu') convnet = dropout(convnet, keep_probability) convnet = fully_connected( convnet, fully_connected_units, activation='softmax') convnet = regression( convnet, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='targets') return convnet
def get_cnn_model(checkpoint_path='cnn_servo_model', width=72, height=48, depth=3, session=None): # Inputs network = input_data(shape=[None, height, width, depth], name='input') # Convolution no.1 # Relu introduces non linearity into training network = conv_2d(network, 8, [5, 3], activation='relu') # Convolution no.2 network = conv_2d(network, 12, [5, 8], activation='relu') # Convolution no.3 network = conv_2d(network, 16, [5, 16], activation='relu') # Convolution no.4 network = conv_2d(network, 24, [3, 20], activation='relu') # Convolution no.5 network = conv_2d(network, 24, [3, 24], activation='relu') # Fully connected no.1 network = fully_connected(network, 256, activation='relu') network = dropout(network, 0.8) # Fully connected no.2 network = fully_connected(network, 100, activation='relu') network = dropout(network, 0.8) # Fully connected no.3 network = fully_connected(network, 50, activation='relu') network = dropout(network, 0.8) # Fully connected no.4 network = fully_connected(network, 10, activation='relu') network = dropout(network, 0.8) # Fully connected no.5 network = fully_connected(network, 1, activation='tanh') # Regression network = regression(network, loss='mean_square', metric='accuracy', learning_rate=1e-4,name='target') # Verbosity yay nay # 0 = nothing model = tflearn.DNN(network, tensorboard_verbose=2, checkpoint_path=checkpoint_path, session=session) return model
def do_cnn_word2vec_2d(trainX, testX, trainY, testY): global max_features global max_document_length print "CNN and word2vec2d" y_test = testY #trainX = pad_sequences(trainX, maxlen=max_features, value=0.) #testX = pad_sequences(testX, maxlen=max_features, value=0.) # Converting labels to binary vectors trainY = to_categorical(trainY, nb_classes=2) testY = to_categorical(testY, nb_classes=2) # Building convolutional network network = input_data(shape=[None,max_document_length,max_features,1], name='input') network = conv_2d(network, 32, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = conv_2d(network, 64, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = fully_connected(network, 128, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 2, activation='softmax') network = regression(network, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='target') model = tflearn.DNN(network, tensorboard_verbose=0) model.fit(trainX, trainY, n_epoch=5, shuffle=True, validation_set=(testX, testY), show_metric=True,run_id="sms") y_predict_list = model.predict(testX) print y_predict_list y_predict = [] for i in y_predict_list: print i[0] if i[0] > 0.5: y_predict.append(0) else: y_predict.append(1) print(classification_report(y_test, y_predict)) print metrics.confusion_matrix(y_test, y_predict)
def main(): pickle_folder = '../pickles_rolloff' pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))] pickle_folders_to_load = sorted(pickle_folders_to_load) # pickle parameters fg_or_bg = 'background' sdr_type = 'sdr' feature = 'beat_spec' beat_spec_len = 432 # training params n_classes = 16 training_percent = 0.85 testing_percent = 0.15 validation_percent = 0.00 # set up training, testing, & validation partitions beat_spec_array, sdr_array = load_beat_spec_and_sdrs(pickle_folders_to_load, pickle_folder, feature, fg_or_bg, sdr_type) train, test, validate = split_into_sets(len(pickle_folders_to_load), training_percent, testing_percent, validation_percent) trainX = np.expand_dims([beat_spec_array[i] for i in train], -1) trainY = np.expand_dims([sdr_array[i] for i in train], -1) testX = np.expand_dims([beat_spec_array[i] for i in test], -1) testY = np.array([sdr_array[i] for i in test]) # Building convolutional network input = input_data(shape=[None, beat_spec_len, 1]) conv1 = conv_1d(input, 32, 10, activation='relu', regularizer="L2") max_pool1 = max_pool_1d(conv1, 2) full = fully_connected(max_pool1, 512, activation='tanh') # single = tflearn.single_unit(full) single = fully_connected(full, 1, activation='linear') regress = tflearn.regression(single, optimizer='sgd', loss='mean_square', learning_rate=0.01) # Training model = tflearn.DNN(regress, tensorboard_verbose=1) model.fit(trainX, trainY, n_epoch=500, snapshot_step=1000, show_metric=True, run_id='{} classes'.format(n_classes - 1)) predicted = np.array(model.predict(testX))[:,0] plot(testY, predicted)
data_dir = "datasets/MNIST" X, Y, X_test, Y_test = mnist.load_data(data_dir=data_dir, one_hot=True) X = X.reshape([-1, 28, 28, 1]) X_test = X_test.reshape([-1, 28, 28, 1]) # Building the network CNN = input_data(shape=[None, 28, 28, 1], name='input') CNN = conv_2d(CNN, 32, 5, activation='relu', regularizer='L2') CNN = max_pool_2d(CNN, 2) CNN = local_response_normalization(CNN) CNN = conv_2d(CNN, 64, 5, activation='relu', regularizer='L2') CNN = max_pool_2d(CNN, 2) CNN = local_response_normalization(CNN) CNN = fully_connected(CNN, 1024, activation=None) CNN = dropout(CNN, 0.5) CNN = fully_connected(CNN, 10, activation='softmax') CNN = regression(CNN, optimizer='adam', learning_rate=0.0001, loss='categorical_crossentropy', name='target') # Training the network # the log dir will be created automaticlly model = tflearn.DNN(CNN, tensorboard_verbose=0, tensorboard_dir='logs/MNIST_tflearn_board/', checkpoint_path='logs/MNIST_tflearn_checkpoints/checkpoint') model.fit({'input': X}, {'target': Y}, n_epoch=3, validation_set=({'input': X_test}, {'target': Y_test}),
net = bidirectional_rnn(net, BasicLSTMCell(cell_size[i]), BasicLSTMCell(cell_size[i])) net = dropout(net, dropout_ratio) elif cell_type == "gru": for i in range(len(cell_size)): if i < len(cell_size) - 1: net = bidirectional_rnn(net, GRUCell(cell_size[i]), GRUCell(cell_size[i]), return_seq=True) net = dropout(net, dropout_ratio) else: net = bidirectional_rnn(net, GRUCell(cell_size[i]), GRUCell(cell_size[i])) net = dropout(net, dropout_ratio) net = fully_connected(net, len(qualities), activation='softmax') net = regression(net, optimizer='adam', learning_rate=learning_rate, loss='categorical_crossentropy') print('Train model') model = tflearn.DNN(net, tensorboard_verbose=1, tensorboard_dir="logdir/bi_lstm") print('Predict') model.fit(X_train, Y_train, validation_set=(X_test, Y_test),
convnet = input_data(shape=[None, 128, 128, 1], name='input') convnet = conv_2d(convnet, 32, 3, activation='relu') convnet = max_pool_2d(convnet, kernel_size=2, strides=2) convnet = conv_2d(convnet, 64, 3, activation='relu') convnet = max_pool_2d(convnet, kernel_size=2, strides=2) convnet = conv_2d(convnet, 128, 3, activation='relu') convnet = max_pool_2d(convnet, kernel_size=2, strides=2) convnet = conv_2d(convnet, 256, 3, activation='relu') convnet = max_pool_2d(convnet, kernel_size=2, strides=2) convnet = fully_connected(convnet, 4096, activation='relu') convnet = dropout(convnet, 0.8) convnet = fully_connected(convnet, 2048, activation='relu') convnet = dropout(convnet, 0.8) convnet = fully_connected(convnet, 1024, activation='relu') convnet = dropout(convnet, 0.8) convnet = fully_connected(convnet, 8, activation='softmax') convnet = regression(convnet, optimizer='adam', learning_rate=0.00001, loss='categorical_crossentropy', name='targets')
net, kernel_size=[2, 2]) # strides default to be the same as kernel_size # layer2 net = conv_2d(net, nb_filter=64, filter_size=[5, 5], activation="relu", padding='same') net = max_pool_2d(net, kernel_size=[2, 2]) #################################################################### # No need to reshape #################################################################### # layer3 net = fully_connected(net, n_units=500, activation="relu") # layer4 net = fully_connected(net, n_units=10, activation="softmax") # calc optimizer net = regression(net, optimizer='sgd', loss='categorical_crossentropy', learning_rate=0.01) # create Deep NN model = tflearn.DNN(net, tensorboard_verbose=0)
resultado_olhos = tk.DoubleVar() tf.reset_default_graph() convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 1], name='input') convnet = conv_2d(convnet, 20, 20, activation='prelu') convnet = max_pool_2d(convnet, 10) convnet = conv_2d(convnet, 25, 10, activation='prelu') convnet = max_pool_2d(convnet, 5) convnet = conv_2d(convnet, 30, 5, activation='prelu') convnet = max_pool_2d(convnet, 5) convnet = fully_connected(convnet, nInd * 10, activation='prelu') convnet = dropout(convnet, 0.8) convnet = fully_connected(convnet, nInd, activation='softmax') convnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', batch_size=16, name='targets') model = tflearn.DNN(convnet, tensorboard_dir='log') if os.path.exists('{}.meta'.format(NOME_MODELO)): model.load(NOME_MODELO) tf.reset_default_graph()
activation='relu', regularizer='L2') pool2 = max_pool_2d(conv2, params['pool_width'], params['pool_stride']) lrn2 = local_response_normalization(pool2) conv3 = conv_2d(lrn2, 128, params['conv_filter'], activation='relu', regularizer='L2') pool3 = max_pool_2d(conv3, params['pool_width'], params['pool_stride']) lrn3 = local_response_normalization(pool3) flat = flatten(lrn3) fully1 = fully_connected(lrn3, 384, activation='relu') drop1 = dropout(fully1, 0.5) fully2 = fully_connected(drop1, 384 / 2, activation='relu') drop2 = dropout(fully2, 0.5) fully3 = fully_connected(drop2, 10, activation='softmax') network = regression(fully3, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001, name='Target') # Train using classifier model = tflearn.DNN(network, tensorboard_verbose=0, tensorboard_dir='../log/') model.fit(X,
convnet = conv_2d(convnet, 32, 5, activation='relu') convnet = max_pool_2d(convnet, 5) convnet = conv_2d(convnet, 64, 5, activation='relu') convnet = max_pool_2d(convnet, 5) convnet = conv_2d(convnet, 128, 5, activation='relu') convnet = max_pool_2d(convnet, 5) convnet = conv_2d(convnet, 64, 5, activation='relu') convnet = max_pool_2d(convnet, 5) convnet = conv_2d(convnet, 32, 5, activation='relu') convnet = max_pool_2d(convnet, 5) convnet = fully_connected(convnet, 1024, activation='relu') convnet = dropout(convnet, 0.8) convnet = fully_connected(convnet, 2, activation='softmax') convnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets') model = tflearn.DNN(convnet, tensorboard_dir='log') if os.path.exists('{}.meta'.format(MODEL_NAME)): model.load(MODEL_NAME) print('model loaded!') train = train_data[:-500] test = train_data[-500:] X = np.array([i[0] for i in train]).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
def define_network(self): """ Defines CNN architecture :return: CNN model """ # My CNN 1 (type1) # # For data normalization # img_prep = ImagePreprocessing() # img_prep.add_featurewise_zero_center() # img_prep.add_featurewise_stdnorm() # # # For creating extra data(increase dataset). Flipped, Rotated, Blurred and etc. images # img_aug = ImageAugmentation() # img_aug.add_random_flip_leftright() # img_aug.add_random_rotation(max_angle=25.0) # img_aug.add_random_blur(sigma_max=3.0) # # self.network = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 1], # data_augmentation=img_aug, # data_preprocessing=img_prep) # self.network = conv_2d(self.network, 64, 5, activation='relu') # self.network = max_pool_2d(self.network, 3, strides=2) # self.network = conv_2d(self.network, 64, 5, activation='relu') # self.network = max_pool_2d(self.network, 3, strides=2) # self.network = conv_2d(self.network, 128, 4, activation='relu') # self.network = dropout(self.network, 0.3) # self.network = fully_connected(self.network, 3072, activation='relu') # self.network = fully_connected(self.network, len(EMOTIONS), activation='softmax') # self.network = regression(self.network, optimizer='adam', loss='categorical_crossentropy') # self.model = tflearn.DNN(self.network, checkpoint_path=os.path.join(CHECKPOINTS_PATH + '/emotion_recognition'), # max_checkpoints=1, tensorboard_verbose=0) # My CNN 2 (type2) # For creating extra data(increase dataset). Flipped, Rotated, Blurred and etc. images img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.0) img_aug.add_random_blur(sigma_max=3.0) self.network = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 1], data_augmentation=img_aug) self.network = conv_2d(self.network, 64, 3, activation='relu') self.network = batch_normalization(self.network) self.network = conv_2d(self.network, 64, 3, activation='relu') self.network = batch_normalization(self.network) self.network = max_pool_2d(self.network, 2, strides=2) self.network = conv_2d(self.network, 128, 3, activation='relu') self.network = batch_normalization(self.network) self.network = conv_2d(self.network, 128, 3, activation='relu') self.network = batch_normalization(self.network) self.network = max_pool_2d(self.network, 2, strides=2) self.network = dropout(self.network, 0.2) self.network = conv_2d(self.network, 256, 3, activation='relu') self.network = batch_normalization(self.network) self.network = conv_2d(self.network, 256, 3, activation='relu') self.network = batch_normalization(self.network) self.network = max_pool_2d(self.network, 2, strides=2) self.network = dropout(self.network, 0.25) self.network = conv_2d(self.network, 512, 3, activation='relu') self.network = batch_normalization(self.network) self.network = conv_2d(self.network, 512, 3, activation='relu') self.network = batch_normalization(self.network) self.network = max_pool_2d(self.network, 2, strides=2) self.network = dropout(self.network, 0.25) self.network = fully_connected(self.network, 1024, activation='relu') self.network = batch_normalization(self.network) self.network = dropout(self.network, 0.45) self.network = fully_connected(self.network, 1024, activation='relu') self.network = batch_normalization(self.network) self.network = dropout(self.network, 0.45) self.network = fully_connected(self.network, len(EMOTIONS), activation='softmax') self.network = regression(self.network, optimizer='adam', loss='categorical_crossentropy') self.model = tflearn.DNN( self.network, checkpoint_path=os.path.join(CHECKPOINTS_PATH + '/emotion_recognition'), max_checkpoints=1, tensorboard_verbose=0) return self.model
drug_gru_1_candidate_bias.append(v) elif "GRU_3/GRU_3/GRUCell/Gates/Linear/Matrix" in v.name: drug_gru_2_gate_matrix.append(v) elif "GRU_3/GRU_3/GRUCell/Candidate/Linear/Matrix" in v.name: drug_gru_2_candidate_matrix.append(v) elif "GRU_3/GRU_3/GRUCell/Gates/Linear/Bias" in v.name: drug_gru_2_gate_bias.append(v) elif "GRU_3/GRU_3/GRUCell/Candidate/Linear/Bias" in v.name: drug_gru_2_candidate_bias.append(v) elif "Embedding_1" in v.name: drug_embd_W.append(v) merging = merge([prot_reshape_6, drug_reshape_6], mode='concat', axis=1) fc_1 = fully_connected(merging, 600, activation='leakyrelu', weights_init="xavier", name='fully1') drop_2 = dropout(fc_1, 0.8) fc_2 = fully_connected(drop_2, 300, activation='leakyrelu', weights_init="xavier", name='fully2') drop_3 = dropout(fc_2, 0.8) linear = fully_connected(drop_3, 1, activation='linear', name='fully3') reg = regression(linear, optimizer='adam', learning_rate=0.001, loss='mean_square', name='target')
strides=1, name='inception_5b_pool') inception_5b_pool_1_1 = conv_2d(inception_5b_pool, 128, filter_size=1, activation='relu', name='inception_5b_pool_1_1') inception_5b_output = merge([ inception_5b_1_1, inception_5b_3_3, inception_5b_5_5, inception_5b_pool_1_1 ], axis=3, mode='concat') pool5_7_7 = avg_pool_2d(inception_5b_output, kernel_size=7, strides=1) pool5_7_7 = dropout(pool5_7_7, 0.4) loss = fully_connected(pool5_7_7, 3, activation='softmax') network = regression(loss, optimizer='momentum', loss='categorical_crossentropy', learning_rate=0.001) model = tflearn.DNN(network, checkpoint_path='model_googlenet', max_checkpoints=1, tensorboard_verbose=2) model.fit(X, Y, n_epoch=1000, validation_set=.25, shuffle=True, show_metric=True, batch_size=64,
nb_filter=32, filter_size=3, activation='relu', strides=1, weight_decay=0.0) net = tflearn.layers.conv.conv_2d(net, nb_filter=32, filter_size=3, activation='relu', strides=1, weight_decay=0.0) net = tflearn.layers.core.flatten(net, name='Flatten') net = fully_connected(net, 1024, activation='tanh', weights_init='truncated_normal', bias_init='zeros', regularizer=None, weight_decay=0) y_conv = fully_connected(net, 369, activation='softmax', weights_init='truncated_normal', bias_init='zeros', regularizer=None, weight_decay=0) total_parameters = 0 for variable in tf.trainable_variables(): # shape is an array of tf.Dimension shape = variable.get_shape()
def make_model(x, y): print("X :", x.shape) print("Y :", y.shape) # Building convolutional network network = input_data(shape=[None, x.shape[1], x.shape[2], 1], name='input') #1 # network = conv_2d(network, 128, activation='sigmoid', regularizer="L2") network = fully_connected(network, 128, activation='sigmoid') network = dropout(network, 0.8) print(network) #2 # network = conv_2d(network, 128, activation='sigmoid', regularizer="L2") network = fully_connected(network, 128, activation='sigmoid') network = dropout(network, 0.8) print(network) #3 network = fully_connected(network, 128, activation='sigmoid') network = dropout(network, 0.8) print(network) #4 network = fully_connected(network, 128, activation='sigmoid') network = dropout(network, 0.8) #5 network = fully_connected(network, 128, activation='sigmoid') network = dropout(network, 0.8) #6 network = fully_connected(network, 128, activation='sigmoid') network = dropout(network, 0.8) #7 network = fully_connected(network, 128, activation='sigmoid') network = dropout(network, 0.8) #8 network = fully_connected(network, 128, activation='sigmoid') network = dropout(network, 0.8) #9 network = fully_connected(network, 128, activation='sigmoid') network = dropout(network, 0.8) #10 network = fully_connected(network, 128, activation='sigmoid') network = dropout(network, 0.8) network = fully_connected(network, y.shape[1], activation='sigmoid') network = regression(network, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='target') # Training model = tflearn.DNN( network, checkpoint_path="./model/intermediate_models/model.ckpt", max_checkpoints=1, tensorboard_verbose=0) model.fit( {'input': x}, {'target': y}, n_epoch=100, batch_size=10, show_metric=True, snapshot_epoch=True, ) return model
train, test, _ = imdb.load_data(path='imdb.pkl', n_words=10000, valid_portion=0.1) trainX, trainY = train testX, testY = test # Data preprocessing # Sequence padding trainX = pad_sequences(trainX, maxlen=200, value=0.) testX = pad_sequences(testX, maxlen=200, value=0.) # Converting labels to binary vectors trainY = to_categorical(trainY, nb_classes=2) testY = to_categorical(testY, nb_classes=2) # Network building net = input_data(shape=[None, 200]) net = embedding(net, input_dim=20000, output_dim=128) net = bidirectional_rnn(net, BasicLSTMCell(128), BasicLSTMCell(128)) net = dropout(net, 0.5) net = fully_connected(net, 2, activation='softmax') net = regression(net, optimizer='adam', loss='categorical_crossentropy') # Training model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=2) model.fit(trainX, trainY, n_epoch=1, validation_set=0.1, show_metric=True, batch_size=64)
def foo(img_fn, model_fn='../data/model/model_weights'): img = cv2.imread(img_fn, cv2.IMREAD_GRAYSCALE) haar_fn = '../data/haarcascade_russian_plate_number.xml' haar = cv2.CascadeClassifier(haar_fn) detected = haar.detectMultiScale(img) plates = [] for x, y, w, h in detected: obj = img[y:y + h, x:x + w] plates.append(obj) chars = plates[0] < filters.threshold_minimum(plates[0]) labeled_chars, a = ndi.label(chars) labeled_chars = (labeled_chars > 1).astype(np.int8) c = measure.find_contours(labeled_chars, .1) letters = [] for i, v in enumerate(c): xs, ys = zip(*[i for i in v]) x = int(min(xs)) y = int(min(ys)) w = int(max(xs) - x + 2) h = int(max(ys) - y + 2) if w < 15: continue letters.append((y, x, h, w)) letters = sorted(letters) letters_img = [plates[0][x:x + w, y:y + h] for y, x, h, w in letters] letters_img = [i for i in letters_img if i[0, 0] > 127] sizes = [image.size for image in letters_img] median = np.median(sizes) allowed_size = median + median / 4 letters_img = [image for image in letters_img if image.size < allowed_size] size = 64 normalized_img = [] for i in letters_img: ratio = i.shape[0] / i.shape[1] img1 = transform.resize(i, [size, int(size / ratio)], mode='constant') width = img1.shape[1] missing = (size - width) // 2 ones = np.ones([size, missing]) img2 = np.append(ones, img1, 1) img3 = np.append(img2, ones, 1) if 2 * missing + width != size: one = np.ones([size, 1]) img4 = np.append(img3, one, 1) else: img4 = img3 normalized_img.append(img4 * 255) net_input = input_data(shape=[None, 64, 64, 1]) conv1 = conv_2d(net_input, nb_filter=4, filter_size=5, strides=[1, 1, 1, 1], activation='relu') max_pool1 = max_pool_2d(conv1, kernel_size=2) conv2 = conv_2d(max_pool1, nb_filter=8, filter_size=5, strides=[1, 2, 2, 1], activation='relu') max_pool2 = max_pool_2d(conv2, kernel_size=2) conv3 = conv_2d(max_pool2, nb_filter=12, filter_size=4, strides=[1, 1, 1, 1], activation='relu') max_pool3 = max_pool_2d(conv3, kernel_size=2) fc1 = fully_connected(max_pool3, n_units=200, activation='relu') drop1 = dropout(fc1, keep_prob=.5) fc2 = fully_connected(drop1, n_units=36, activation='softmax') net = regression(fc2) model = DNN(network=net) model.load(model_file=model_fn) labels = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789') predicted = [] for i in normalized_img: y = model.predict(i.reshape([1, 64, 64, 1])) y_pred = np.argmax(y[0]) predicted.append(labels[y_pred]) return ''.join(predicted)
""" Define Convolutional Nerual Network model for MNIST input """ from tflearn import DNN from tflearn.layers.core import input_data, dropout, fully_connected from tflearn.layers.conv import conv_2d, max_pool_2d from tflearn.layers.normalization import local_response_normalization from tflearn.layers.estimator import regression # Building convolutional network network = input_data(shape=[None, 28, 28, 1], name='input') network = conv_2d(network, 32, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = conv_2d(network, 64, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = fully_connected(network, 128, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 10, activation='softmax') network = regression(network, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='target') # Define model model = DNN(network, tensorboard_verbose=0)
network = conv_2d(network, 64, 5, strides=2, activation='relu', weights_init='truncated_normal', bias_init='truncated_normal') network = conv_2d(network, 64, 5, strides=2, activation='relu', weights_init='truncated_normal', bias_init='truncated_normal') network = max_pool_2d(network, 2, strides=2) network = fully_connected(network, 256, activation='relu') network = dropout(network, dropout_rate) network = fully_connected(network, n_classes, activation='softmax') network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=learning_rate) # Training model = tflearn.DNN(network, checkpoint_path='model.tfl.ckpt', tensorboard_verbose=0, max_checkpoints=1) model.load("model.tfl")
# In[8]: #first convolution Layer model = conv_2d(model, 46, 5, activation='relu') model = max_pool_2d(model, 5) #second Convolution Layer model = conv_2d(model, 42, 5, activation='relu') model = max_pool_2d(model, 5) #third Convolution Layer model = conv_2d(model, 38, 5, activation='relu') model = max_pool_2d(model, 5) #fully Connected Layer model = fully_connected(model, 1024, activation='relu') model = dropout(model, 0.7) model = fully_connected(model, 2, activation='softmax') # In[9]: model = regression(model, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.003, name='output') # In[10]: cnnmodel = tflearn.DNN(model)
train_data = create_train_data() convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 3], name='input') convnet = conv_2d( convnet, 32, 5, activation='relu') # Convolution Layer-1 with 5 Filters of size 32*32 convnet = max_pool_2d(convnet, 5) # Max Pooling with filter size of 5*5 convnet = conv_2d( convnet, 64, 5, activation='relu') # Convolution Layer-2 with 5 Filters of size 64*64 convnet = max_pool_2d(convnet, 5) # Max pooling with filter size of 5*5 convnet = conv_2d( convnet, 32, 5, activation='relu') # Convolution Layer-3 with 5 Filters of size 32*32 convnet = max_pool_2d(convnet, 5) # Max pooling with filter size of 5*5 convnet = fully_connected( convnet, 1024, activation='relu') # Fully Connected Layer-4 with 1024 neurons convnet = dropout(convnet, 0.4) # Dropout rate set to 0.4 convnet = fully_connected(convnet, 2, activation='softmax') convnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets') model = tflearn.DNN(convnet, tensorboard_dir='log') ############################################################################## # Modelling the training data train = train_data X = np.array([i[0] for i in train]) Y = [i[1] for i in train]
x = x.reshape([-1, 28, 28, 1]) test_x = test_x.reshape([-1, 28, 28, 1]) input_data = input_data(shape=[None, 28, 28, 1], name='input') # nb_filter表示输出通道数,filter_size表示卷积核大小5x5 conv1 = conv_2d(input_data, nb_filter=32, filter_size=5, activation='relu') # kernel_size表示池化窗口大小2x2 pool1 = max_pool_2d(conv1, kernel_size=2) conv2 = conv_2d(pool1, nb_filter=64, filter_size=5, activation='relu') pool2 = max_pool_2d(conv2, kernel_size=2) fc = fully_connected(pool2, n_units=1024, activation='relu') fc = dropout(fc, 0.8) output = fully_connected(fc, n_units=10, activation='softmax') network = regression(output, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='targets') model = tflearn.DNN(network, tensorboard_dir='log') if os.path.exists('tflearncnn.model.index') and os.path.exists( 'tflearncnn.model.meta'): model.load('tflearncnn.model')
import tflearn as tf from tflearn.layers.core import input_data,dropout,fully_connected from tflearn.layers.estimator import regression from tflearn.data_utils import load_csv import tensorflow as tfs data,target=load_csv('dataset.csv',target_column=-1,columns_to_ignore=[1],has_header=True,categorical_labels=True,n_classes=2) network = input_data(shape=[None,23],name='input') network = fully_connected(network,10,activation='relu',name='nn_layer_1') network = fully_connected(network,5,activation='relu',name='nn_layer_2') network = fully_connected(network,2,activation='softmax',name='output_layer') network = regression(network,optimizer='adam',loss='categorical_crossentropy',learning_rate=0.001) model = tf.DNN(network, tensorboard_verbose=3) model.fit(data,target,n_epoch=50,validation_set=0.3,show_metric=True,run_id='model1') #model.save('/model1')
def analysis(): import cv2 import numpy as np import os from random import shuffle from tqdm import \ tqdm verify_dir = 'testpicture' IMG_SIZE = 50 LR = 1e-3 MODEL_NAME = 'healthyvsunhealthy-{}-{}.model'.format(LR, '2conv-basic') def process_verify_data(): verifying_data = [] for img in tqdm(os.listdir(verify_dir)): path = os.path.join(verify_dir, img) img_num = img.split('.')[0] img = cv2.imread(path, cv2.IMREAD_COLOR) img = cv2.resize(img, (IMG_SIZE, IMG_SIZE)) verifying_data.append([np.array(img), img_num]) np.save('verify_data.npy', verifying_data) return verifying_data verify_data = process_verify_data() import tflearn from tflearn.layers.conv import conv_2d, max_pool_2d from tflearn.layers.core import input_data, dropout, fully_connected from tflearn.layers.estimator import regression import tensorflow as tf tf.reset_default_graph() convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 3], name='input') convnet = conv_2d(convnet, 32, 3, activation='relu') convnet = max_pool_2d(convnet, 3) convnet = conv_2d(convnet, 64, 3, activation='relu') convnet = max_pool_2d(convnet, 3) convnet = conv_2d(convnet, 128, 3, activation='relu') convnet = max_pool_2d(convnet, 3) convnet = conv_2d(convnet, 32, 3, activation='relu') convnet = max_pool_2d(convnet, 3) convnet = conv_2d(convnet, 64, 3, activation='relu') convnet = max_pool_2d(convnet, 3) convnet = fully_connected(convnet, 1024, activation='relu') convnet = dropout(convnet, 0.8) convnet = fully_connected(convnet, 4, activation='softmax') convnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets') model = tflearn.DNN(convnet, tensorboard_dir='log') if os.path.exists('{}.meta'.format(MODEL_NAME)): model.load(MODEL_NAME) print('model loaded!') import matplotlib.pyplot as plt fig = plt.figure() for num, data in enumerate(verify_data): img_num = data[1] img_data = data[0] y = fig.add_subplot(3, 4, num + 1) orig = img_data data = img_data.reshape(IMG_SIZE, IMG_SIZE, 3) # model_out = model.predict([data])[0] model_out = model.predict([data])[0] if np.argmax(model_out) == 0: str_label = 'healthy' elif np.argmax(model_out) == 1: str_label = 'bacterial' elif np.argmax(model_out) == 2: str_label = 'viral' elif np.argmax(model_out) == 3: str_label = 'lateblight' if str_label == 'healthy': status = "HEALTHY" else: status = "UNHEALTHY" message = tk.Label(text='Status: ' + status, background="lightgreen", fg="Brown", font=("", 15)) message.grid(column=0, row=3, padx=10, pady=10) if str_label == 'bacterial': diseasename = "Bacterial Spot " disease = tk.Label(text='Disease Name: ' + diseasename, background="lightgreen", fg="Black", font=("", 15)) disease.grid(column=0, row=4, padx=10, pady=10) r = tk.Label(text='Click below for remedies...', background="lightgreen", fg="Brown", font=("", 15)) r.grid(column=0, row=5, padx=10, pady=10) button3 = tk.Button(text="Remedies", command=bact) button3.grid(column=0, row=6, padx=10, pady=10) elif str_label == 'viral': diseasename = "Yellow leaf curl virus " disease = tk.Label(text='Disease Name: ' + diseasename, background="lightgreen", fg="Black", font=("", 15)) disease.grid(column=0, row=4, padx=10, pady=10) r = tk.Label(text='Click below for remedies...', background="lightgreen", fg="Brown", font=("", 15)) r.grid(column=0, row=5, padx=10, pady=10) button3 = tk.Button(text="Remedies", command=vir) button3.grid(column=0, row=6, padx=10, pady=10) elif str_label == 'lateblight': diseasename = "Late Blight " disease = tk.Label(text='Disease Name: ' + diseasename, background="lightgreen", fg="Black", font=("", 15)) disease.grid(column=0, row=4, padx=10, pady=10) r = tk.Label(text='Click below for remedies...', background="lightgreen", fg="Brown", font=("", 15)) r.grid(column=0, row=5, padx=10, pady=10) button3 = tk.Button(text="Remedies", command=latebl) button3.grid(column=0, row=6, padx=10, pady=10) else: r = tk.Label(text='Plant is healthy', background="lightgreen", fg="Black", font=("", 15)) r.grid(column=0, row=4, padx=10, pady=10) button = tk.Button(text="Exit", command=exit) button.grid(column=0, row=9, padx=20, pady=20)
def create_googlenet(num_classes): # Building 'GoogleNet' network = input_data(shape=[None, 227, 227, 3], data_preprocessing=img_prep, data_augmentation=img_aug) conv1_7_7 = conv_2d(network, 64, 7, strides=2, activation='relu', name='conv1_7_7_s2') pool1_3_3 = max_pool_2d(conv1_7_7, 3, strides=2) pool1_3_3 = local_response_normalization(pool1_3_3) conv2_3_3_reduce = conv_2d(pool1_3_3, 64, 1, activation='relu', name='conv2_3_3_reduce') conv2_3_3 = conv_2d(conv2_3_3_reduce, 192, 3, activation='relu', name='conv2_3_3') conv2_3_3 = local_response_normalization(conv2_3_3) pool2_3_3 = max_pool_2d(conv2_3_3, kernel_size=3, strides=2, name='pool2_3_3_s2') # 3a inception_3a_1_1 = conv_2d(pool2_3_3, 64, 1, activation='relu', name='inception_3a_1_1') inception_3a_3_3_reduce = conv_2d(pool2_3_3, 96, 1, activation='relu', name='inception_3a_3_3_reduce') inception_3a_3_3 = conv_2d(inception_3a_3_3_reduce, 128, filter_size=3, activation='relu', name='inception_3a_3_3') inception_3a_5_5_reduce = conv_2d(pool2_3_3, 16, filter_size=1, activation='relu', name='inception_3a_5_5_reduce') inception_3a_5_5 = conv_2d(inception_3a_5_5_reduce, 32, filter_size=5, activation='relu', name='inception_3a_5_5') inception_3a_pool = max_pool_2d(pool2_3_3, kernel_size=3, strides=1, name='inception_3a_pool') inception_3a_pool_1_1 = conv_2d(inception_3a_pool, 32, filter_size=1, activation='relu', name='inception_3a_pool_1_1') inception_3a_output = merge([inception_3a_1_1, inception_3a_3_3, inception_3a_5_5, inception_3a_pool_1_1], mode='concat', axis=3) # 3b inception_3b_1_1 = conv_2d(inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_1_1') inception_3b_3_3_reduce = conv_2d(inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_3_3_reduce') inception_3b_3_3 = conv_2d(inception_3b_3_3_reduce, 192, filter_size=3, activation='relu', name='inception_3b_3_3') inception_3b_5_5_reduce = conv_2d(inception_3a_output, 32, filter_size=1, activation='relu', name='inception_3b_5_5_reduce') inception_3b_5_5 = conv_2d(inception_3b_5_5_reduce, 96, filter_size=5, name='inception_3b_5_5') inception_3b_pool = max_pool_2d(inception_3a_output, kernel_size=3, strides=1, name='inception_3b_pool') inception_3b_pool_1_1 = conv_2d(inception_3b_pool, 64, filter_size=1, activation='relu', name='inception_3b_pool_1_1') inception_3b_output = merge([inception_3b_1_1, inception_3b_3_3, inception_3b_5_5, inception_3b_pool_1_1], mode='concat', axis=3, name='inception_3b_output') pool3_3_3 = max_pool_2d(inception_3b_output, kernel_size=3, strides=2, name='pool3_3_3') # 4a inception_4a_1_1 = conv_2d(pool3_3_3, 192, filter_size=1, activation='relu', name='inception_4a_1_1') inception_4a_3_3_reduce = conv_2d(pool3_3_3, 96, filter_size=1, activation='relu', name='inception_4a_3_3_reduce') inception_4a_3_3 = conv_2d(inception_4a_3_3_reduce, 208, filter_size=3, activation='relu', name='inception_4a_3_3') inception_4a_5_5_reduce = conv_2d(pool3_3_3, 16, filter_size=1, activation='relu', name='inception_4a_5_5_reduce') inception_4a_5_5 = conv_2d(inception_4a_5_5_reduce, 48, filter_size=5, activation='relu', name='inception_4a_5_5') inception_4a_pool = max_pool_2d(pool3_3_3, kernel_size=3, strides=1, name='inception_4a_pool') inception_4a_pool_1_1 = conv_2d(inception_4a_pool, 64, filter_size=1, activation='relu', name='inception_4a_pool_1_1') inception_4a_output = merge([inception_4a_1_1, inception_4a_3_3, inception_4a_5_5, inception_4a_pool_1_1], mode='concat', axis=3, name='inception_4a_output') # 4b inception_4b_1_1 = conv_2d(inception_4a_output, 160, filter_size=1, activation='relu', name='inception_4a_1_1') inception_4b_3_3_reduce = conv_2d(inception_4a_output, 112, filter_size=1, activation='relu', name='inception_4b_3_3_reduce') inception_4b_3_3 = conv_2d(inception_4b_3_3_reduce, 224, filter_size=3, activation='relu', name='inception_4b_3_3') inception_4b_5_5_reduce = conv_2d(inception_4a_output, 24, filter_size=1, activation='relu', name='inception_4b_5_5_reduce') inception_4b_5_5 = conv_2d(inception_4b_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4b_5_5') inception_4b_pool = max_pool_2d(inception_4a_output, kernel_size=3, strides=1, name='inception_4b_pool') inception_4b_pool_1_1 = conv_2d(inception_4b_pool, 64, filter_size=1, activation='relu', name='inception_4b_pool_1_1') inception_4b_output = merge([inception_4b_1_1, inception_4b_3_3, inception_4b_5_5, inception_4b_pool_1_1], mode='concat', axis=3, name='inception_4b_output') # 4c inception_4c_1_1 = conv_2d(inception_4b_output, 128, filter_size=1, activation='relu', name='inception_4c_1_1') inception_4c_3_3_reduce = conv_2d(inception_4b_output, 128, filter_size=1, activation='relu', name='inception_4c_3_3_reduce') inception_4c_3_3 = conv_2d(inception_4c_3_3_reduce, 256, filter_size=3, activation='relu', name='inception_4c_3_3') inception_4c_5_5_reduce = conv_2d(inception_4b_output, 24, filter_size=1, activation='relu', name='inception_4c_5_5_reduce') inception_4c_5_5 = conv_2d(inception_4c_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4c_5_5') inception_4c_pool = max_pool_2d(inception_4b_output, kernel_size=3, strides=1) inception_4c_pool_1_1 = conv_2d(inception_4c_pool, 64, filter_size=1, activation='relu', name='inception_4c_pool_1_1') inception_4c_output = merge([inception_4c_1_1, inception_4c_3_3, inception_4c_5_5, inception_4c_pool_1_1], mode='concat', axis=3, name='inception_4c_output') # 4d inception_4d_1_1 = conv_2d(inception_4c_output, 112, filter_size=1, activation='relu', name='inception_4d_1_1') inception_4d_3_3_reduce = conv_2d(inception_4c_output, 144, filter_size=1, activation='relu', name='inception_4d_3_3_reduce') inception_4d_3_3 = conv_2d(inception_4d_3_3_reduce, 288, filter_size=3, activation='relu', name='inception_4d_3_3') inception_4d_5_5_reduce = conv_2d(inception_4c_output, 32, filter_size=1, activation='relu', name='inception_4d_5_5_reduce') inception_4d_5_5 = conv_2d(inception_4d_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4d_5_5') inception_4d_pool = max_pool_2d(inception_4c_output, kernel_size=3, strides=1, name='inception_4d_pool') inception_4d_pool_1_1 = conv_2d(inception_4d_pool, 64, filter_size=1, activation='relu', name='inception_4d_pool_1_1') inception_4d_output = merge([inception_4d_1_1, inception_4d_3_3, inception_4d_5_5, inception_4d_pool_1_1], mode='concat', axis=3, name='inception_4d_output') # 4e inception_4e_1_1 = conv_2d(inception_4d_output, 256, filter_size=1, activation='relu', name='inception_4e_1_1') inception_4e_3_3_reduce = conv_2d(inception_4d_output, 160, filter_size=1, activation='relu', name='inception_4e_3_3_reduce') inception_4e_3_3 = conv_2d(inception_4e_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_4e_3_3') inception_4e_5_5_reduce = conv_2d(inception_4d_output, 32, filter_size=1, activation='relu', name='inception_4e_5_5_reduce') inception_4e_5_5 = conv_2d(inception_4e_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_4e_5_5') inception_4e_pool = max_pool_2d(inception_4d_output, kernel_size=3, strides=1, name='inception_4e_pool') inception_4e_pool_1_1 = conv_2d(inception_4e_pool, 128, filter_size=1, activation='relu', name='inception_4e_pool_1_1') inception_4e_output = merge([inception_4e_1_1, inception_4e_3_3, inception_4e_5_5, inception_4e_pool_1_1], axis=3, mode='concat') pool4_3_3 = max_pool_2d(inception_4e_output, kernel_size=3, strides=2, name='pool_3_3') # 5a inception_5a_1_1 = conv_2d(pool4_3_3, 256, filter_size=1, activation='relu', name='inception_5a_1_1') inception_5a_3_3_reduce = conv_2d(pool4_3_3, 160, filter_size=1, activation='relu', name='inception_5a_3_3_reduce') inception_5a_3_3 = conv_2d(inception_5a_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_5a_3_3') inception_5a_5_5_reduce = conv_2d(pool4_3_3, 32, filter_size=1, activation='relu', name='inception_5a_5_5_reduce') inception_5a_5_5 = conv_2d(inception_5a_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_5a_5_5') inception_5a_pool = max_pool_2d(pool4_3_3, kernel_size=3, strides=1, name='inception_5a_pool') inception_5a_pool_1_1 = conv_2d(inception_5a_pool, 128, filter_size=1, activation='relu', name='inception_5a_pool_1_1') inception_5a_output = merge([inception_5a_1_1, inception_5a_3_3, inception_5a_5_5, inception_5a_pool_1_1], axis=3, mode='concat') # 5b inception_5b_1_1 = conv_2d(inception_5a_output, 384, filter_size=1, activation='relu', name='inception_5b_1_1') inception_5b_3_3_reduce = conv_2d(inception_5a_output, 192, filter_size=1, activation='relu', name='inception_5b_3_3_reduce') inception_5b_3_3 = conv_2d(inception_5b_3_3_reduce, 384, filter_size=3, activation='relu', name='inception_5b_3_3') inception_5b_5_5_reduce = conv_2d(inception_5a_output, 48, filter_size=1, activation='relu', name='inception_5b_5_5_reduce') inception_5b_5_5 = conv_2d(inception_5b_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_5b_5_5') inception_5b_pool = max_pool_2d(inception_5a_output, kernel_size=3, strides=1, name='inception_5b_pool') inception_5b_pool_1_1 = conv_2d(inception_5b_pool, 128, filter_size=1, activation='relu', name='inception_5b_pool_1_1') inception_5b_output = merge([inception_5b_1_1, inception_5b_3_3, inception_5b_5_5, inception_5b_pool_1_1], axis=3, mode='concat') pool5_7_7 = avg_pool_2d(inception_5b_output, kernel_size=7, strides=1) pool5_7_7 = dropout(pool5_7_7, 0.4) #fc loss = fully_connected(pool5_7_7, num_classes, activation='softmax') network = regression(loss, optimizer='momentum', loss='categorical_crossentropy', learning_rate=0.01) return network
def analysis(filepath): verify_data = process_verify_data(filepath) str_label = "Cannot make a prediction." status = "Error" tf.reset_default_graph() convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 3], name='input') ''' # relu: Relu is used in the middle / hidden layers of the network to regularize the activation. It is essentialy the function: max(0, x) Activation should not be in negative, either it should be zero or more than that. # softmax: Softmax is used for the output layer in multi class classification problems. It is essentialy the function: log(1 + e^x) It outputs a vector of probabilities of each class. ''' convnet = conv_2d(convnet, 32, 3, activation='relu') convnet = max_pool_2d(convnet, 3) convnet = conv_2d(convnet, 64, 3, activation='relu') convnet = max_pool_2d(convnet, 3) convnet = conv_2d(convnet, 128, 3, activation='relu') convnet = max_pool_2d(convnet, 3) convnet = conv_2d(convnet, 32, 3, activation='relu') convnet = max_pool_2d(convnet, 3) convnet = conv_2d(convnet, 64, 3, activation='relu') convnet = max_pool_2d(convnet, 3) convnet = fully_connected(convnet, 1024, activation='relu') convnet = dropout(convnet, 0.8) convnet = fully_connected(convnet, 4, activation='softmax') convnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets') model = tflearn.DNN(convnet, tensorboard_dir='log') if os.path.exists('{}.meta'.format(MODEL_NAME)): model.load(MODEL_NAME) print ('Model loaded successfully.') else: print ('Error: Create a model using neural_network.py first.') img_data, img_name = verify_data[0], verify_data[1] orig = img_data data = img_data.reshape(IMG_SIZE, IMG_SIZE, 3) model_out = model.predict([data])[0] if np.argmax(model_out) == 0: str_label = 'Healthy' elif np.argmax(model_out) == 1: str_label = 'Bacterial' elif np.argmax(model_out) == 2: str_label = 'Viral' elif np.argmax(model_out) == 3: str_label = 'Lateblight' if str_label =='Healthy': status = 'Healthy' else: status = 'Unhealthy' result = 'Status: ' + status + '.' if (str_label != 'Healthy'): result += '\nDisease: ' + str_label + '.' return result
print(type(feature_vector_train), feature_vector_train.shape) print(type(labels_train), labels_train.shape) num_features = 128 # output of the resnet classes = num_faces # number of faces to be recognized n1 = 68 # number of nodes in hidden layer 1 n2 = 68 # number of nodes in hidden layer 2 n3 = 32 # number of nodes in hidden layer 3 n4 = 16 # number of nodes in hidden layer 3 n5 = 16 # number of nodes in hidden layer 3 learning_rate = 0.03 num_epochs = 100 output = input_data(shape=[None, num_features, 1, 1], name="input") output = fully_connected(output, n1, activation="relu") # output = fully_connected(output, n2, activation="relu") # # output = fully_connected(output, n3, activation="relu") # # output = fully_connected(output, n4, activation="relu") # # output = fully_connected(output, n5, activation="relu") output = fully_connected(output, classes, activation="softmax") output = regression(output, optimizer="adam", learning_rate=learning_rate, loss="categorical_crossentropy",
def test1(x_train, y_train, x_test, y_test): # Train using classifier #define network int_put = input_data(shape=[None, 224, 5, 5, 1], ) conv1 = conv_3d(int_put, 24, [24, 3, 3], padding='VALID', strides=[1, 1, 1, 1, 1], activation='prelu', weight_decay=0.05) print('conv1', conv1.get_shape().as_list()) batch_norm = batch_normalization(conv1) #act1=tflearn.activations.relu(batch_norm) #pool1=max_pool_3d(act1,[1,1,2,2,1],strides=[1,1,1,1,1]) conv2 = conv_3d(batch_norm, 12, [24, 3, 3], padding='VALID', strides=[1, 1, 1, 1, 1], activation='prelu', weight_decay=0.05) print('conv2', conv2.get_shape().as_list()) batch_norm = batch_normalization(conv2) #act = tflearn.activations.relu(batch_norm) #pool2=max_pool_3d(act,[1,1,2,2,1],strides=[1,1,1,1,1]) net = residual_block_concat(batch_norm, 2, 16, batch_norm=None, downsample_strides=1, weight_decay=0.05) #net = residual_block(net, 5, 16) #net = residual_block(net, 1, 32, ) #net = residual_block(net, 4, 32) #net = residual_block(net, 1, 64, downsample=True) #net = residual_block(net, 2, 64) net = tflearn.batch_normalization(net) net = tflearn.activation(net, 'relu') ''' conv3=conv_3d(batch_norm,24,[24,1,1],padding='VALID',strides=[1,5,1,1,1],activation='prelu') print('conv3', conv3.get_shape().as_list()) batch_norm = batch_normalization(conv3) #act=tflearn.activations.relu(batch_norm) #pool3=max_pool_3d(act,[1,1,2,2,1],strides=[1,1,1,1,1]) ''' flat = flatten(net) print('flat', flat.get_shape().as_list()) ip1 = fully_connected( flat, 100, activation='prelu', ) dro = dropout(ip1, 0.9) ip2 = fully_connected( dro, 20, activation='softmax', ) network = regression(ip2, optimizer='Adagrad', loss='categorical_crossentropy', learning_rate=0.01) model = tflearn.DNN(network, tensorboard_verbose=0, tensorboard_dir="./tflearn_logs/") model.fit(x_train, y_train, n_epoch=200, shuffle=True, validation_set=(x_test, y_test), show_metric=True, batch_size=32, run_id='3d_net')
3, activation='relu', regularizer="L2", weight_decay=0.0001) network = max_pool_2d(network, 2) network = conv_2d(network, 16, 5, activation='relu', regularizer="L2", weight_decay=0.0001) network = max_pool_2d(network, 2) # A fully connected layer with regularization (batch normalization and dropout) network = fully_connected(network, 512, activation='relu') network = batch_normalization(network) network = dropout(network, 0.7) # The climate proxies are processed in a separate "branch" and then merged to the network cbranch = fully_connected(in_climate, 64, activation='relu', regularizer='L2') cbranch = fully_connected(cbranch, 64, activation='relu', regularizer='L2') cbranch = fully_connected(cbranch, 64, activation='relu', regularizer='L2') network = merge([network, cbranch], 'concat') # merge the climate input into the network # A series of fully connected layers: network = fully_connected(network, 512, activation='relu') network = batch_normalization(network) network = dropout(network, 0.7)
test_img = np.asarray(test_img, dtype=np.int64) CNN = input_data(shape=[None, 224, 224, 3], name="input_x") CNN = conv_2d(CNN, 32, 7, activation='relu', regularizer="L2") CNN = avg_pool_2d(CNN, 2) CNN = dropout(CNN, keep_prob=0.5) CNN = conv_2d(CNN, 45, 5, activation='relu', regularizer="L2") CNN = avg_pool_2d(CNN, 2) CNN = dropout(CNN, keep_prob=0.5) CNN = conv_2d(CNN, 10, 2, activation='relu', regularizer='L2') CNN = avg_pool_2d(CNN, 2) fl = fully_connected(CNN, 1, activation='softmax') output = regression(fl, learning_rate=0.0005, loss='binary_crossentropy', name='targets') model = tflearn.DNN(output, tensorboard_verbose=0, tensorboard_dir='./walk_run', checkpoint_path='./walk_run/checkpoint') model.fit({'input_x': train_img}, {'targets': train_label}, show_metric=True, n_epoch=20, batch_size=600) model.evaluate({'input_x': test_img}, {'targets': test_label})
# Real-time data preprocessing img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Real-time data augmentation img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_flip_updown() img_aug.add_random_rotation(max_angle=25.) # Convolutional network building network = input_data(shape=[None, 80, 80, 3], data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 32, 3, activation='relu') network = max_pool_2d(network, 2) network = conv_2d(network, 64, 3, activation='relu') network = conv_2d(network, 64, 3, activation='relu') network = max_pool_2d(network, 2) network = fully_connected(network, 512, activation='relu') network = dropout(network, 0.5) network = fully_connected(network, 2, activation='softmax') network = regression(network, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) # Define model model = tflearn.DNN(network, tensorboard_verbose=0)