def __init__(self): # initializebevideo stream self.video_stream = cv2.VideoCapture( VIDEO_PREDICTOR.camera_source) self.face_detector = cv2.CascadeClassifier( VIDEO_PREDICTOR.face_detection_classifier) self.shape_predictor = None self.shape_predictor = dlib.shape_predictor( DATASET.shape_predictor_path) model = None with tf.Graph().as_default(): network = build_model( use_landmarks=True, use_hog_and_landmarks=True, use_hog_sliding_window_and_landmarks=True) model = DNN(network) if os.path.isfile( TRAINING.save_model_path_landmarks_hog_sw): model.load(TRAINING.save_model_path_landmarks_hog_sw) else: print("Error: file '{}' not found".format( TRAINING.save_model_path_landmarks_hog_sw)) self.model = model self.last_predicted_time = 0 self.last_predicted_confidence = 0 self.last_predicted_emotion = ""
def _build_network(self): self.logger.info('Started CNN structure construction') network = input_data(shape=[None, self.height, self.width, 3], dtype=float32) network = conv_2d(network, 64, 5, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 128, 4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 1, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 2, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=0.001) self.cnn_ = DNN(network, checkpoint_path='firenet', max_checkpoints=1, tensorboard_verbose=2) self.logger.info('Finished CNN structure construction')
def create_model(net, model_type, save_model): net = regression(net, optimizer='adam', loss='categorical_crossentropy', name='output') if save_model: return DNN(net, checkpoint_path=get_checkpoint_path(model_type)) else: return DNN(net)
def load_model(self): """ This method loads a created model from the drive. """ model = None with tf.Graph().as_default(): self.create_network() model = DNN(self.network, tensorboard_verbose=0) model.load("./classifier/models/classifier.tflearn") self.model = model
def load_model(): model = None with tf.Graph().as_default(): print "loading pretrained model..." network = build_model() model = DNN(network) if os.path.isfile(TRAINING.save_model_path): model.load(TRAINING.save_model_path) else: print "Error: file '{}' not found".format(TRAINING.save_model_path) return model
def setup(self): ops.reset_default_graph() net = input_data(shape=[None, len(self.training[0])]) net = fully_connected(net, 10) net = fully_connected(net, 10) net = fully_connected(net, len(self.output[0]), activation="softmax") net = regression(net) self.model = DNN(net) if exists("src/models/model.tflearn.index"): self.model.load("src/models/model.tflearn") else: self.model.fit(self.training, self.output, n_epoch=1000, batch_size=8, show_metric=True) self.model.save("src/models/model.tflearn")
def load_model(): model = None with tf.Graph().as_default(): print("loading pretrained model...") network = build_model(use_landmarks=True, use_hog_and_landmarks=True, use_hog_sliding_window_and_landmarks=True) model = DNN(network) if os.path.isfile(TRAINING.save_model_path_landmarks_hog_sw): model.load(TRAINING.save_model_path_landmarks_hog_sw) else: print("Error: file '{}' not found".format( TRAINING.save_model_path_landmarks_hog_sw)) return model
def tflearn_cifar(): """ 图像分类 :return: """ (X_train, Y_train), (X_test, Y_test) = cifar10.load_data() X_train, Y_train = shuffle(X_train, Y_train) Y_train = to_categorical(Y_train, nb_classes=10) Y_test = to_categorical(Y_test, nb_classes=10) # 对数据集进行零中心化(即对整个数据集计算平均值),同时进行 STD 标准化(即对整个数据集计算标准差) img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # 通过随机左右翻转和随机旋转来增强数据集 img_aug = ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_rotation(max_angle=25.) # 定义模型 network = input_data(shape=(None, 32, 32, 3), data_preprocessing=img_prep, data_augmentation=img_aug) network = conv_2d(network, 32, 3, activation="relu") network = max_pool_2d(network, 2) network = conv_2d(network, 64, 3, activation="relu") network = conv_2d(network, 64, 3, activation="relu") network = max_pool_2d(network, 2) network = fully_connected(network, 512, activation="relu") network = dropout(network, 0.5) network = fully_connected(network, 10, activation="softmax") network = regression(network, optimizer="adam", loss="categorical_crossentropy", learning_rate=0.001) # 训练模型 model = DNN(network, tensorboard_verbose=0) model.fit(X_train, Y_train, n_epoch=50, shuffle=True, validation_set=(X_test, Y_test), show_metric=True, batch_size=96, run_id="cifar10_cnn")
def createModel(self): print("Creating model...") imageSize = self.config.sequenceLength nbClasses = self.config.nbClasses net = input_data(shape=[None, imageSize, imageSize, 1], name='input') net = conv_2d(net, imageSize/2, 2, activation='elu', weights_init="Xavier", name='conv1') net = max_pool_2d(net, 2) net = conv_2d(net, imageSize, 2, activation='elu', weights_init="Xavier", name='conv2') net = max_pool_2d(net, 2) net = conv_2d(net, imageSize*2, 2, activation='elu', weights_init="Xavier", name='conv3') net = max_pool_2d(net, 2) net = conv_2d(net, imageSize*4, 2, activation='elu', weights_init="Xavier", name='conv4') net = max_pool_2d(net, 2) net = fully_connected(net, imageSize*8, activation='elu') net = dropout(net, 0.5) net = fully_connected(net, nbClasses, activation='softmax') net = regression(net, optimizer='rmsprop', loss='categorical_crossentropy') self.model = DNN(net) print("Model created!")
def classify(self, sentence, model_output: tflearn.DNN, user_id="default") -> list: results = model_output.predict([self.bow(sentence, self.words)])[0] results = [[i, r] for i, r in enumerate(results) if r > self.ERROR_THRESHOLD] results.sort(key=lambda x: x[1], reverse=True) return_list = [] for r in results: return_list.append((self.classes[r[0]], r[1])) # array is bad leaning msgError = [] for n in return_list: err = np.array(n[1]) if err < 0.50: msgError.append(err) # return length bad learning is greater than 2 if len(msgError) > 1: msgError.sort(reverse=True) return ['Noel masih belajar nih, bisa diperjelas lagi pertanyaan nya?', 'error', '', msgError[0].tolist()] context = {} for i in self.intents["intents"]: if i["tag"] == return_list[0][0]: context[user_id] = i["context_set"] output_var = [random.choice(i['responses']), i['tag'], context[user_id]] acc = return_list[0][1].tolist() output_var.append(acc) return output_var
def create_model(net): net = regression(net, optimizer='adam', loss='categorical_crossentropy', name='output') return DNN(net)
def create_model(self): """ Creates DNN model that is based on built algorithm. Needed algorithm is builded with self.build_algorithm call. """ self.log_named("model creation started") if self.algorithm is not None: self.model = DNN(self.algorithm, checkpoint_path=self.checkpoints_dir_path, max_checkpoints=1, tensorboard_verbose=3, tensorboard_dir=self.learn_logs_dir_path) self.log_named("model creation finished") else: self.log_named_warning( "model was not created, because algorithm is None!")
def _create_model(): reset_default_graph() net = input_data([None, 5]) net = fully_connected(net, N_HIDDEN_UNITS, bias=True, activation='tanh') net = fully_connected(net, 2, activation='softmax') net = regression(net, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy') return DNN(net)
def BotANN(): tf.reset_default_graph() rete = input_data(shape=[None, len(temi)]) rete = fully_connected(rete, 8) rete = fully_connected(rete, 8) rete = fully_connected(rete, len(classi), activation='softmax') rete = regression(rete) model = DNN(rete, tensorboard_dir='logs') return model
def create_modelTF(): network = input_data(shape=[None, 5, 1], name='input') network = fully_connected(network, 25, activation='relu') network = fully_connected(network, 1, activation='linear') network = regression(network, optimizer='adam', learning_rate=1e-2, loss='mean_square', name='target') # model = DNN(network, checkpoint_path='snake_nn.tfl', tensorboard_dir='log', max_checkpoints=1 ) model = DNN(network, tensorboard_dir='log') return model
def train(self): """ Start training """ # 1: build a list of image filenames self.build_image_filenames_list() # 2: use list information to init our numpy variables self.init_np_variables() # 3: Add images to our Tensorflow dataset self.add_tf_dataset(self.list_cow_files, 0) self.add_tf_dataset(self.list_noncow_files, 1) # 4: Process TF dataset self.process_tf_dataset() # 5: Setup image preprocessing self.setup_image_preprocessing() # 6: Setup network structure self.setup_nn_network() # 7: Train our deep neural network tf_model = DNN(self.tf_network, tensorboard_verbose=3, checkpoint_path='model_cows.tfl.ckpt') tf_model.fit(self.tf_x, self.tf_y, n_epoch=100, shuffle=True, validation_set=(self.tf_x_test, self.tf_y_test), show_metric=True, batch_size=96, snapshot_epoch=True, run_id='model_cows') # 8: Save model tf_model.save('model_cows.tflearn')
def __build_model(self): """ Composes a neural network architecture capable of predicting the level of a student given his/her grades. :return: The neural network model. """ tf.compat.v1.reset_default_graph() nn = input_data(shape=[None, 3]) nn = fully_connected(nn, 32) nn = fully_connected(nn, 32) nn = fully_connected(nn, 10, activation='softmax') nn = regression(nn) model = DNN(nn) return model
def _create_model(self): reset_default_graph() net = input_data([None, SEQUENCE_LEN]) net = embedding(net, input_dim=len(self._vocab.vocabulary_), output_dim=WORD_FEATURE_DIM) net = lstm(net, DOC_FEATURE_DIM, dropout=0.8) net = fully_connected(net, 2, activation='softmax') net = regression(net, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy') return DNN(net)
def build(self, checkpoint_path, max_checkpoints, tensorboard_verbose): """ Args: checkpoint_path (str): max_checkpoints (int): tensorboard_verbose (int): Returns tflearn.DNN: A tflearn DNN object that can be used as an estimator. """ return DNN(self.network, checkpoint_path=checkpoint_path, max_checkpoints=max_checkpoints, tensorboard_verbose=tensorboard_verbose)
def createModel(self): print("Creating model...") sequenceLength = self.config.sequenceLength nbClasses = self.config.nbClasses nFeatures = self.config.nFeatures net = input_data(shape=[None, sequenceLength, nFeatures]) net = lstm(net, n_units=nFeatures, dropout=0.8) net = fully_connected(net, nbClasses, activation='softmax') net = regression(net) self.model = DNN(net) print("Model created!")
def model(size=32, color_channels=3): convnet = input_data((None, size, size, color_channels)) convnet = conv_2d(convnet, 16, 5, activation='relu') convnet = conv_2d(convnet, 16, 5, activation='relu') convnet = max_pool_2d(convnet, 2) convnet = conv_2d(convnet, 20, 3, activation='relu') convnet = max_pool_2d(convnet, 2) convnet = conv_2d(convnet, 20, 3, activation='relu') convnet = max_pool_2d(convnet, 2) convnet = fully_connected(convnet, 1024, 'relu') convnet = dropout(convnet, .9) convnet = fully_connected(convnet, 10, 'softmax') convnet = regression(convnet) return DNN(convnet, tensorboard_verbose=3)
def load_model(): model = None with tf.Graph().as_default(): print("loading pretrained model...") data, validation, test = load_data(validation=True, test=True) network = build_model() model = DNN(network) if os.path.isfile(TRAINING.save_model_path): model.load(TRAINING.save_model_path) else: print("Error: file '{}' not found".format(TRAINING.save_model_path)) print("--") print("Validation samples: {}".format(len(validation['Y']))) print("Test samples: {}".format(len(test['Y']))) print("--") print("evaluating...") start_time = time.time() validation_accuracy = evaluate(model, validation['X'], validation['X2'], validation['Y']) print(" - validation accuracy = {0:.1f}".format(validation_accuracy * 100)) test_accuracy = evaluate(model, test['X'], test['X2'], test['Y']) print(" - test accuracy = {0:.1f}".format(test_accuracy * 100)) print(" - evalution time = {0:.1f} sec".format(time.time() - start_time)) return model
def train( self, X_train, Y_train, X_val, Y_val ): with tf.Graph().as_default(): print("Building Model...........") network = build_CNN() model = DNN( network, tensorboard_dir="path_to_logs", tensorboard_verbose=0, checkpoint_path="path_to_checkpoints", max_checkpoints=1 ) if self.is_training: # Training phase print("start training...") print(" - emotions = {}".format(7)) print(" - optimizer = '{}'".format(self.optimizer)) print(" - learning_rate = {}".format(0.016)) print(" - learning_rate_decay = {}".format(self.learning_rate_decay)) print(" - otimizer_param ({}) = {}".format(self.optimizer, self.optimizer_param)) print(" - Dropout = {}".format(self.dropout)) print(" - epochs = {}".format(self.epochs)) start_time = time.time() model.fit( {'input': X_train.reshape(-1, 48, 48, 1)}, {'output': Y_train}, validation_set=( {'input': X_val.reshape(-1, 48, 48, 1)}, {'output': Y_val}, ), batch_size=128, n_epoch=10, show_metric=True, snapshot_step=100 ) training_time = time.time() - start_time print("training time = {0:.1f} sec".format(training_time)) print("saving model...") model.save("saved_model.bin")
def createModel(self): print("Creating model...") nFeatures = self.config.nFeatures nbClasses = self.config.nbClasses #preprocess = DataPreprocessing() #preprocess.add_featurewise_zero_center() #preprocess.add_featurewise_stdnorm() net = input_data(shape=[None, nFeatures]) net = fully_connected(net, 32, activation='elu', weights_init="Xavier") net = fully_connected(net, 64, activation='elu', weights_init="Xavier") net = fully_connected(net, 128, activation='elu', weights_init="Xavier") net = fully_connected(net, nbClasses, activation='softmax') net = regression(net) self.model = DNN(net, tensorboard_verbose=3) print("Model created!")
def chat(model: tflearn.DNN, data: T.Dict[str, T.Any], words: T.List, labels: T.List) -> None: """ Initialize a discussion with a user and respond to user input with trained model. """ print("Start speaking with me! Enter Q to quit") while True: inp = input("You: ") if inp.lower() == "q" or inp.lower() == "Q": break results = model.predict([bag_of_words(inp, words)]) results_index = numpy.argmax(results) tag = labels[results_index] for tg in data["intents"]: if tg['tag'] == tag: responses = tg['responses'] print(random.choice(responses))
def use_tflearn(x_train, y_train, x_test, y_test): net = input_data(shape=[None, x_train.shape[1]], name='input') net = fully_connected(net, 24, activation='sigmoid', bias_init='normal') net = fully_connected(net, 16, activation='sigmoid', bias_init='normal') net = fully_connected(net, 12, activation='sigmoid', bias_init='normal') net = fully_connected(net, 8, activation='sigmoid', bias_init='normal') net = regression(net) model = DNN(net, tensorboard_dir=TENSORBOARD_DIR.as_posix(), tensorboard_verbose=3, best_checkpoint_path=CHECKPOINT_PATH.as_posix()) model.fit(x_train, y_train, validation_set=(x_test, y_test), n_epoch=100, batch_size=10, show_metric=True, run_id='DNN-4f') model.save(MODEL_FILE.as_posix()) return model
# -*- coding: utf-8 -*- """ Source : https://towardsdatascience.com/tflearn-soving-xor-with-a-2x2x1-feed-forward-neural-network-6c07d88689ed """ from tflearn import DNN from tflearn.layers.core import input_data, dropout, fully_connected from tflearn.layers.estimator import regression #Training examples X = [[0,0], [0,1], [1,0], [1,1]] Y = [[0], [1], [1], [0]] input_layer = input_data(shape=[None, 2]) #input layer of size 2 hidden_layer = fully_connected(input_layer , 2, activation='tanh') #hidden layer of size 2 output_layer = fully_connected(hidden_layer, 1, activation='tanh') #output layer of size 1 #use Stohastic Gradient Descent and Binary Crossentropy as loss function regression = regression(output_layer , optimizer='sgd', loss='binary_crossentropy', learning_rate=5) model = DNN(regression) #fit the model model.fit(X, Y, n_epoch=5000, show_metric=True); #predict all examples print ('Expected: ', [i[0] > 0 for i in Y]) print ('Predicted: ', [i[0] > 0 for i in model.predict(X)]) model.get_weights(hidden_layer.W) model.get_weights(output_layer.W) model.save("tflearn-xor")
fully_connected(net[i], 1, activation='sigmoid') for i in range(len(net)) ] net = merge(net, mode='concat') print("After RNN : ", net.get_shape().as_list()) print("After Dropout : ", net.get_shape().as_list()) net = regression(net, optimizer='adam', loss='binary_crossentropy', learning_rate=0.005) print("After regression : ", net.get_shape().as_list()) testX = trainX[int(0.3 * len(trainY)):] testY = trainY[int(0.3 * len(trainY)):] # Training model = DNN(net, clip_gradients=0., tensorboard_verbose=2) embeddingWeights = get_layer_variables_by_name('EmbeddingLayer')[0] # Assign your own weights (for example, a numpy array [input_dim, output_dim]) model.set_weights(embeddingWeights, embeddings) model.fit(trainX, trainY, n_epoch=3, validation_set=0.1, show_metric=True, batch_size=32, shuffle=True) #print( model.evaluate(testX, testY) ) predictions = model.predict(testX) predictions = prob2Onehot(predictions) #print("Predictions : ", list(predictions[10]))
def train(optimizer=HYPERPARAMS.optimizer, optimizer_param=HYPERPARAMS.optimizer_param, learning_rate=HYPERPARAMS.learning_rate, keep_prob=HYPERPARAMS.keep_prob, learning_rate_decay=HYPERPARAMS.learning_rate_decay, decay_step=HYPERPARAMS.decay_step, train_model=True): print "loading dataset " + DATASET.name + "..." if train_model: data, validation = load_data(validation=True) else: data, validation, test = load_data(validation=True, test=True) with tf.Graph().as_default(): print "building model..." network = build_model(optimizer, optimizer_param, learning_rate, keep_prob, learning_rate_decay, decay_step) model = DNN(network, tensorboard_dir=TRAINING.logs_dir, tensorboard_verbose=0, checkpoint_path=TRAINING.checkpoint_dir, max_checkpoints=TRAINING.max_checkpoints) #tflearn.config.init_graph(seed=None, log_device=False, num_cores=6) if train_model: # Training phase print "start training..." print " - emotions = {}".format(NETWORK.output_size) print " - optimizer = '{}'".format(optimizer) print " - learning_rate = {}".format(learning_rate) print " - learning_rate_decay = {}".format(learning_rate_decay) print " - otimizer_param ({}) = {}".format( 'beta1' if optimizer == 'adam' else 'momentum', optimizer_param) print " - keep_prob = {}".format(keep_prob) print " - epochs = {}".format(TRAINING.epochs) print " - use landmarks = {}".format(NETWORK.use_landmarks) print " - use hog + landmarks = {}".format( NETWORK.use_hog_and_landmarks) print " - use hog sliding window + landmarks = {}".format( NETWORK.use_hog_sliding_window_and_landmarks) print " - use batchnorm after conv = {}".format( NETWORK.use_batchnorm_after_conv_layers) print " - use batchnorm after fc = {}".format( NETWORK.use_batchnorm_after_fully_connected_layers) start_time = time.time() if NETWORK.use_landmarks: model.fit([data['X'], data['X2']], data['Y'], validation_set=([validation['X'], validation['X2']], validation['Y']), snapshot_step=TRAINING.snapshot_step, show_metric=TRAINING.vizualize, batch_size=TRAINING.batch_size, n_epoch=TRAINING.epochs) else: model.fit(data['X'], data['Y'], validation_set=(validation['X'], validation['Y']), snapshot_step=TRAINING.snapshot_step, show_metric=TRAINING.vizualize, batch_size=TRAINING.batch_size, n_epoch=TRAINING.epochs) validation['X2'] = None training_time = time.time() - start_time print "training time = {0:.1f} sec".format(training_time) if TRAINING.save_model: print "saving model..." model.save(TRAINING.save_model_path) if not(os.path.isfile(TRAINING.save_model_path)) and \ os.path.isfile(TRAINING.save_model_path + ".meta"): os.rename(TRAINING.save_model_path + ".meta", TRAINING.save_model_path) print "evaluating..." validation_accuracy = evaluate(model, validation['X'], validation['X2'], validation['Y']) print " - validation accuracy = {0:.1f}".format( validation_accuracy * 100) return validation_accuracy else: # Testing phase : load saved model and evaluate on test dataset print "start evaluation..." print "loading pretrained model..." if os.path.isfile(TRAINING.save_model_path): model.load(TRAINING.save_model_path) else: print "Error: file '{}' not found".format( TRAINING.save_model_path) exit() if not NETWORK.use_landmarks: validation['X2'] = None test['X2'] = None print "--" print "Validation samples: {}".format(len(validation['Y'])) print "Test samples: {}".format(len(test['Y'])) print "--" print "evaluating..." start_time = time.time() validation_accuracy = evaluate(model, validation['X'], validation['X2'], validation['Y']) print " - validation accuracy = {0:.1f}".format( validation_accuracy * 100) test_accuracy = evaluate(model, test['X'], test['X2'], test['Y']) print " - test accuracy = {0:.1f}".format(test_accuracy * 100) print " - evalution time = {0:.1f} sec".format(time.time() - start_time) return test_accuracy
class FireDetector: def __init__(self, height=INPUT_HEIGHT, width=INPUT_WIDTH, n_channels=NUMBER_CHANNELS): self.height = height self.width = width self.n_channels = n_channels self.logger = create_logger('Fire Detector') self._build_network() def _build_network(self): self.logger.info('Started CNN structure construction') network = input_data(shape=[None, self.height, self.width, 3], dtype=float32) network = conv_2d(network, 64, 5, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 128, 4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 1, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 2, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=0.001) self.cnn_ = DNN(network, checkpoint_path='firenet', max_checkpoints=1, tensorboard_verbose=2) self.logger.info('Finished CNN structure construction') def load_weights(self, weights_path): self.logger.info('Loading weights...') self.cnn_.load(weights_path, weights_only=True) self.logger.info('Weights loaded successfully') def predict(self, images): images = self._ensure_expected_shape(images) predictions = self.cnn_.predict(images) predictions = [pred[0] for pred in predictions] return predictions def _ensure_expected_shape(self, images): images_reshaped = [] expected_shape = (self.height, self.width, self.n_channels) for img in images: if img.shape != (expected_shape): img = reshape_image(img, self.height, self.width) images_reshaped.append(img) return images_reshaped