def setup_sentence_model(): X_train = get_sentence_embeddings(headlines_train) X_eval = get_sentence_embeddings(headlines_eval) model = Sequential() model.add( Dense(256, input_dim=512, activation='relu', bias_initializer='zeros', kernel_regularizer=regularizers.l2(args.lambd))) model.add(Dropout(0.25)) model.add(Dense(1, activation='sigmoid', bias_initializer='zeros')) model.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', km.binary_precision(), km.binary_recall()]) # Fit the model model.fit(X_train, labels_train, epochs=200, batch_size=args.batch_size, callbacks=callbacks, validation_data=(X_eval, labels_eval)) return model
def get_conv_model(input_shape): model = Sequential() model.add( Conv2D(128, (3, 3), activation='relu', strides=(1, 1), padding='same', input_shape=input_shape)) model.add( Conv2D(64, (3, 3), activation='relu', strides=(1, 1), padding='same')) model.add( Conv2D(32, (3, 3), activation='relu', strides=(1, 1), padding='same')) model.add(MaxPool2D((2, 2))) model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dense(64, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.summary() model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc', km.binary_precision(), km.binary_recall()]) return model
def gruModel(embeddingMatrix, maxDataLenght, embeddingVectorLength, numAttributes, numNeurons): model = Sequential() model.add( Embedding(input_dim=numAttributes, output_dim=embeddingVectorLength, weights=[embeddingMatrix], input_length=maxDataLenght, trainable=False)) model.add(GRU(numNeurons, return_sequences=False)) model.add(Dropout(0.2)) model.add( Dense(1, activation='sigmoid', kernel_regularizer=regularizers.l2(0.001))) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[ 'accuracy', km.binary_f1_score(), km.binary_precision(), km.binary_recall() ]) return model
def get_binary_model(input_shape=128): model = Sequential() model.add( Dense(128, activation='relu', kernel_initializer='random_normal', input_dim=input_shape)) model.add(Dense(256, activation='relu', kernel_initializer='random_normal')) model.add(Dropout(0.5)) model.add(Dense(128, activation='relu', kernel_initializer='random_normal')) model.add(Dense(64, activation='relu', kernel_initializer='random_normal')) model.add(Dense(32, activation='relu', kernel_initializer='random_normal')) model.add(Dropout(0.5)) model.add(Dense(8, activation='relu', kernel_initializer='random_normal')) model.add( Dense(1, activation='sigmoid', kernel_initializer='random_normal')) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc', km.binary_precision(), km.binary_recall()]) return model
def load_predict(pre_list): # 清除session,避免重复调用出错 keras.backend.clear_session() BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) model_path = os.path.join(BASE_DIR, 'recommend', 'model.h5') model = load_model(model_path, custom_objects={ 'binary_precision': km.binary_precision(), 'binary_recall': km.binary_recall(), 'binary_f1_score': km.f1_score() }) predictions = model.predict(pre_list) print(predictions) return predictions
def load_object(root_folder, obj_descr_type, model_name, dataset, refactoring_name): if model_name == 'deep-learning' and obj_descr_type == 'model': file_name = root_folder + "/" + obj_descr_type + "-" + model_name + "-" + dataset + "-" + refactoring_name.replace( " ", "") + ".h5" return keras_load_model(file_name, custom_objects={ "binary_precision": binary_precision(), "binary_recall": binary_recall() }) else: file_name = root_folder + "/" + obj_descr_type + "-" + model_name + "-" + dataset + "-" + refactoring_name.replace( " ", "") + ".joblib" return load(file_name)
def predic_unseen(model_name, file_info, unseen_files, data_path, threshold=0.5): # Load Model dependencies = { 'binary_precision': km.binary_precision(), 'binary_recall': km.binary_recall() } model = load_model(data_path + 'Models/' + model_name, custom_objects=dependencies) # Load data test_x = [] test_y = [] for j in range(len(unseen_files)): # Get the file name and read the data file = file_info[file_info.fname == unseen_files[j]].fname.iloc[0] audio_data = pickle.load(open(data_path + 'VGG/' + file, 'rb')) test_x.append(audio_data) # Get the class label label = file_info[file_info.fname == unseen_files[j]].label.iloc[0] test_y.append(np.repeat([label], (len(audio_data)))) # Make predictions test_x = [response for sublist in test_x for response in sublist] test_y = [response for sublist in test_y for response in sublist] test_x = np.array(test_x) test_y = np.array(test_y) y_pred = model.predict(test_x) y_pred_th = (y_pred > threshold) # Confusion matrix cm_test = confusion_matrix(test_y, y_pred_th) print(cm_test) # Recall tpr_test = recall_score(test_y, y_pred_th) print('TPR: ', tpr_test) # Precision precision_test = precision_score(test_y, y_pred_th) print('Precision: ', precision_test) # F1 score fscore_test = f1_score(test_y, y_pred_th) print('F1 score: ', fscore_test)
def load_models(folder, streams): models_list = [] for stream in streams: print('\nLoading', stream, 'model\n') models_list.append( load_model(folder + stream + '_best-weights.h5', custom_objects={ 'binary_precision': keras_metrics.binary_precision(), 'binary_recall': keras_metrics.binary_recall() })) return models_list
def compile_model(self): metrics = [km.binary_precision(0), km.binary_recall(0)] self.NeuMF.compile(optimizer=SGD(lr=LEARN_RATE), loss='binary_crossentropy', metrics=metrics) # update history dict self.history['precision'] = [] self.history['recall'] = [] self.history['val_precision'] = [] self.history['val_recall'] = [] self.history['loss'] = [] self.history['val_loss'] = [] print('compiling NeuMF Model ...')
def get_custom_activations_dict(filepath=None): """ Import all implemented custom activation functions so they can be used when loading a Keras model. Parameters ---------- filepath : Optional[str] Path to json file containing additional custom objects. """ from snntoolbox.utils.utils import binary_sigmoid, binary_tanh, \ ClampedReLU, LimitedReLU, NoisySoftplus import keras_metrics as km # Todo: We should be able to load a different activation for each layer. # Need to remove this hack: activation_str = 'relu_Q1.4' activation = get_quantized_activation_function_from_string(activation_str) custom_objects = { 'binary_sigmoid': binary_sigmoid, 'binary_tanh': binary_tanh, # Todo: This should work regardless of the specific attributes of the # ClampedReLU class used during training. 'clamped_relu': ClampedReLU(), 'LimitedReLU': LimitedReLU, 'relu6': LimitedReLU({'max_value': 6}), activation_str: activation, 'Noisy_Softplus': NoisySoftplus, 'precision': precision, 'binary_precision': km.binary_precision(label=0), 'binary_recall': km.binary_recall(label=0), 'activity_regularizer': keras.regularizers.l1} if filepath is not None and filepath != '': with open(filepath) as f: kwargs = json.load(f) for key in kwargs: if 'LimitedReLU' in key: custom_objects[key] = LimitedReLU(kwargs[key]) return custom_objects
def get_recurrent_model(input_shape): model = Sequential() model.add(LSTM(128, return_sequences=True, input_shape=input_shape)) model.add(LSTM(128, return_sequences=True)) model.add(Dropout(0.5)) model.add(TimeDistributed(Dense(64, activation='relu'))) model.add(TimeDistributed(Dense(32, activation='relu'))) model.add(TimeDistributed(Dense(16, activation='relu'))) model.add(TimeDistributed(Dense(8, activation='relu'))) model.add(Flatten()) model.add(Dense(1, activation='sigmoid')) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc', km.binary_precision(), km.binary_recall()]) return model
def build_tf_graph(self): '''keras to tf conversion''' # loading keras model K.set_learning_phase(0) model = load_model(self.keras_model_path, custom_objects={ 'binary_precision': km.binary_precision(), 'binary_recall': km.binary_recall() }) # create frozen graph of the keras model frozen_graph = self.__freeze_session__( K.get_session(), output_names=[out.op.name for out in model.outputs]) # save model as .pb file tf.train.write_graph(frozen_graph, 'saved_models/tf_model', self.tf_path, as_text=False)
def main(input_path, model_path): model = load_model(model_path, custom_objects={ "binary_precision": keras_metrics.binary_precision(), "binary_recall": keras_metrics.binary_recall() }) allowed_filetypes = [ '.avi', '.wmv', '.mpg', '.mov', '.mp4', '.mkv', '.3gp', '.webm', '.ogv' ] videos_path = [] if os.path.isfile(input_path) and input_path.lower().endswith( tuple(allowed_filetypes)): videos_path = [input_path] elif os.path.isdir(input_path): search_path = os.path.join(input_path, '**') for ext in allowed_filetypes: videos_path.extend(glob2.glob(os.path.join(search_path, '*' + ext))) else: print("Not a valid input.") videos_path = natsorted(videos_path) for video_path in videos_path: folder = os.path.dirname(os.path.abspath(video_path)) filename = os.path.basename(video_path) json_path = os.path.join(folder, filename.split('.')[0] + '.json') print(video_path) if os.path.isfile(json_path): continue else: game_inference(model, video_path, json_path, samples=24, height=224, width=224)
def Lstm(dataInput, maxDataLength): train, test = train_test_split(dataInput, test_size=0.2) xTrain, yTrain = list(zip(*train)) xTest, yTest = list(zip(*test)) yTrain = np.array(yTrain) yTest = np.array(yTest) xTrain = sequence.pad_sequences(xTrain, maxlen=maxDataLength) xTest = sequence.pad_sequences(xTest, maxlen=maxDataLength) embedding_vector_length = 32 model = Sequential() model.add( Embedding(NUM_OF_ATTRIBUTES, embedding_vector_length, input_length=maxDataLength)) model.add(Bidirectional(LSTM(NUM_OF_NEURONS, return_sequences=False))) model.add(Dropout(0.2)) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[ 'accuracy', km.binary_f1_score(), km.binary_precision(), km.binary_recall() ]) model.fit(xTrain, yTrain, validation_data=(xTest, yTest), epochs=NUM_OF_EPOCHS, batch_size=512, verbose=0) scores = model.evaluate(xTest, yTest, verbose=0) for i in range(1, 5): print("%s : %.2f%%" % (model.metrics_names[i], scores[i] * 100)) print("========================================================")
def Build_Model_CNN_Text(word_index, embedding_index, number_of_classes, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM, sparse_categorical, min_hidden_layer_cnn, max_hidden_layer_cnn, min_nodes_cnn, max_nodes_cnn, random_optimizor, dropout, simple_model=False, _l2=0.01, lr=1e-3): """ def buildModel_CNN(word_index,embedding_index,number_of_classes,MAX_SEQUENCE_LENGTH,EMBEDDING_DIM,Complexity=0): word_index in word index , embedding_index is embeddings index, look at data_helper.py number_of_classes is number of classes, MAX_SEQUENCE_LENGTH is maximum lenght of text sequences, EMBEDDING_DIM is an int value for dimention of word embedding look at data_helper.py Complexity we have two different CNN model as follows F=0 is simple CNN with [1 5] hidden layer Complexity=2 is more complex model of CNN with filter_length of range [1 10] """ model = Sequential() if simple_model: embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM)) for word, i in word_index.items(): embedding_vector = embedding_index.get(word) if embedding_vector is not None: # words not found in embedding index will be all-zeros. embedding_matrix[i] = embedding_vector else: embedding_matrix[i] = embedding_index['UNK'] model.add( Embedding(len(word_index) + 1, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=True)) values = list(range(min_nodes_cnn, max_nodes_cnn)) Layer = list(range(min_hidden_layer_cnn, max_hidden_layer_cnn)) Layer = random.choice(Layer) for i in range(0, Layer): Filter = random.choice(values) model.add( Conv1D(Filter, 5, activation='relu', kernel_regularizer=l2(_l2))) model.add(Dropout(dropout)) model.add(MaxPooling1D(5)) model.add(Flatten()) Filter = random.choice(values) model.add(Dense(Filter, activation='relu', kernel_regularizer=l2(_l2))) model.add(Dropout(dropout)) Filter = random.choice(values) model.add(Dense(Filter, activation='relu', kernel_regularizer=l2(_l2))) model.add(Dropout(dropout)) if number_of_classes == 2: model.add( Dense(1, activation='sigmoid', kernel_regularizer=l2(_l2))) model_tmp = model model.compile(loss='binary_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score(), km.binary_true_positive(), km.binary_true_negative(), km.binary_false_positive(), km.binary_false_negative() ]) else: model.add( Dense(number_of_classes, activation='softmax', kernel_regularizer=l2(_l2))) model_tmp = model if sparse_categorical: model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.sparse_categorical_precision(), km.sparse_categorical_recall(), km.sparse_categorical_f1_score(), km.sparse_categorical_true_positive(), km.sparse_categorical_true_negative(), km.sparse_categorical_false_positive(), km.sparse_categorical_false_negative() ]) else: model.compile(loss='categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative() ]) else: embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM)) for word, i in word_index.items(): embedding_vector = embedding_index.get(word) if embedding_vector is not None: # words not found in embedding index will be all-zeros. embedding_matrix[i] = embedding_vector else: embedding_matrix[i] = embedding_index['UNK'] embedding_layer = Embedding(len(word_index) + 1, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=True) # applying a more complex convolutional approach convs = [] values_layer = list(range(min_hidden_layer_cnn, max_hidden_layer_cnn)) filter_sizes = [] layer = random.choice(values_layer) print("Filter ", layer) for fl in range(0, layer): filter_sizes.append((fl + 2)) values_node = list(range(min_nodes_cnn, max_nodes_cnn)) node = random.choice(values_node) print("Node ", node) sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH, ), dtype='int32') embedded_sequences = embedding_layer(sequence_input) for fsz in filter_sizes: l_conv = Conv1D(node, kernel_size=fsz, activation='relu')(embedded_sequences) l_pool = MaxPooling1D(5)(l_conv) #l_pool = Dropout(0.25)(l_pool) convs.append(l_pool) l_merge = Concatenate(axis=1)(convs) l_cov1 = Conv1D(node, 5, activation='relu')(l_merge) l_cov1 = Dropout(dropout)(l_cov1) l_pool1 = MaxPooling1D(5)(l_cov1) l_cov2 = Conv1D(node, 5, activation='relu')(l_pool1) l_cov2 = Dropout(dropout)(l_cov2) l_pool2 = MaxPooling1D(30)(l_cov2) l_flat = Flatten()(l_pool2) l_dense = Dense(1024, activation='relu')(l_flat) l_dense = Dropout(dropout)(l_dense) l_dense = Dense(512, activation='relu')(l_dense) l_dense = Dropout(dropout)(l_dense) if number_of_classes == 2: preds = Dense(1, activation='sigmoid')(l_dense) else: preds = Dense(number_of_classes, activation='softmax')(l_dense) model = Model(sequence_input, preds) model_tmp = model if number_of_classes == 2: model.compile(loss='binary_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score(), km.binary_true_positive(), km.binary_true_negative(), km.binary_false_positive(), km.binary_false_negative() ]) else: if sparse_categorical: model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.sparse_categorical_precision(), km.sparse_categorical_recall(), km.sparse_categorical_f1_score(), km.sparse_categorical_true_positive(), km.sparse_categorical_true_negative(), km.sparse_categorical_false_positive(), km.sparse_categorical_false_negative() ]) else: model.compile(loss='categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative() ]) return model, model_tmp
def Build_Model_RNN_Text(word_index, embedding_index, number_of_classes, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM, sparse_categorical, min_hidden_layer_rnn, max_hidden_layer_rnn, min_nodes_rnn, max_nodes_rnn, random_optimizor, dropout, use_cuda=True, use_bidirectional=True, _l2=0.01, lr=1e-3): """ def buildModel_RNN(word_index, embedding_index, number_of_classes, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM, sparse_categorical): word_index in word index , embedding_index is embeddings index, look at data_helper.py number_of_classes is number of classes, MAX_SEQUENCE_LENGTH is maximum lenght of text sequences """ Recurrent = CuDNNGRU if use_cuda else GRU model = Sequential() values = list(range(min_nodes_rnn, max_nodes_rnn + 1)) values_layer = list(range(min_hidden_layer_rnn - 1, max_hidden_layer_rnn)) layer = random.choice(values_layer) print(layer) embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM)) for word, i in word_index.items(): embedding_vector = embedding_index.get(word) if embedding_vector is not None: # words not found in embedding index will be all-zeros. embedding_matrix[i] = embedding_vector else: embedding_matrix[i] = embedding_index['UNK'] model.add( Embedding(len(word_index) + 1, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=True)) gru_node = random.choice(values) print(gru_node) for i in range(0, layer): if use_bidirectional: model.add( Bidirectional( Recurrent(gru_node, return_sequences=True, kernel_regularizer=l2(_l2)))) else: model.add( Recurrent(gru_node, return_sequences=True, kernel_regularizer=l2(_l2))) model.add(Dropout(dropout)) if use_bidirectional: model.add( Bidirectional(Recurrent(gru_node, kernel_regularizer=l2(_l2)))) else: model.add(Recurrent(gru_node, kernel_regularizer=l2(_l2))) model.add(Dropout(dropout)) model.add(Dense(256, activation='relu', kernel_regularizer=l2(_l2))) if number_of_classes == 2: model.add(Dense(1, activation='sigmoid', kernel_regularizer=l2(_l2))) model_tmp = model model.compile(loss='binary_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score(), km.binary_true_positive(), km.binary_true_negative(), km.binary_false_positive(), km.binary_false_negative() ]) else: model.add( Dense(number_of_classes, activation='softmax', kernel_regularizer=l2(_l2))) model_tmp = model if sparse_categorical: model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.sparse_categorical_precision(), km.sparse_categorical_recall(), km.sparse_categorical_f1_score(), km.sparse_categorical_true_positive(), km.sparse_categorical_true_negative(), km.sparse_categorical_false_positive(), km.sparse_categorical_false_negative() ]) else: model.compile(loss='categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative() ]) return model, model_tmp
def Build_Model_RNN_Image(shape, number_of_classes, sparse_categorical, min_nodes_rnn, max_nodes_rnn, random_optimizor, dropout): """ def Image_model_RNN(num_classes,shape): num_classes is number of classes, shape is (w,h,p) """ values = list(range(min_nodes_rnn - 1, max_nodes_rnn)) node = random.choice(values) x = Input(shape=shape) # Encodes a row of pixels using TimeDistributed Wrapper. encoded_rows = TimeDistributed(CuDNNLSTM(node, recurrent_dropout=dropout))(x) node = random.choice(values) # Encodes columns of encoded rows. encoded_columns = CuDNNLSTM(node, recurrent_dropout=dropout)(encoded_rows) # Final predictions and model. #prediction = Dense(256, activation='relu')(encoded_columns) if number_of_classes == 2: prediction = Dense(1, activation='sigmoid')(encoded_columns) else: prediction = Dense(number_of_classes, activation='softmax')(encoded_columns) model = Model(x, prediction) model_tmp = model if number_of_classes == 2: model.compile(loss='binary_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score(), km.binary_true_positive(), km.binary_true_negative(), km.binary_false_positive(), km.binary_false_negative() ]) else: if sparse_categorical: model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.sparse_categorical_precision(), km.sparse_categorical_recall(), km.sparse_categorical_f1_score(), km.sparse_categorical_true_positive(), km.sparse_categorical_true_negative(), km.sparse_categorical_false_positive(), km.sparse_categorical_false_negative() ]) else: model.compile(loss='categorical_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative() ]) return model, model_tmp
def Build_Model_CNN_Image(shape, number_of_classes, sparse_categorical, min_hidden_layer_cnn, max_hidden_layer_cnn, min_nodes_cnn, max_nodes_cnn, random_optimizor, dropout): """"" def Image_model_CNN(num_classes,shape): num_classes is number of classes, shape is (w,h,p) """ "" model = Sequential() values = list(range(min_nodes_cnn, max_nodes_cnn)) Layers = list(range(min_hidden_layer_cnn, max_hidden_layer_cnn)) Layer = random.choice(Layers) Filter = random.choice(values) model.add(Conv2D(Filter, (3, 3), padding='same', input_shape=shape)) model.add(Activation('relu')) model.add(Conv2D(Filter, (3, 3))) model.add(Activation('relu')) for i in range(0, Layer): Filter = random.choice(values) model.add(Conv2D(Filter, (3, 3), padding='same')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(dropout)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(dropout)) if number_of_classes == 2: model.add(Dense(1, activation='sigmoid', kernel_constraint=maxnorm(3))) model_tmp = model model.compile(loss='binary_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score(), km.binary_true_positive(), km.binary_true_negative(), km.binary_false_positive(), km.binary_false_negative() ]) else: model.add( Dense(number_of_classes, activation='softmax', kernel_constraint=maxnorm(3))) model_tmp = model if sparse_categorical: model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.sparse_categorical_precision(), km.sparse_categorical_recall(), km.sparse_categorical_f1_score(), km.sparse_categorical_true_positive(), km.sparse_categorical_true_negative(), km.sparse_categorical_false_positive(), km.sparse_categorical_false_negative() ]) else: model.compile(loss='categorical_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative() ]) return model, model_tmp
def Build_Model_DNN_Text(shape, number_of_classes, sparse_categorical, min_hidden_layer_dnn, max_hidden_layer_dnn, min_nodes_dnn, max_nodes_dnn, random_optimizor, dropout, _l2=0.01, lr=1e-3): """ buildModel_DNN_Tex(shape, number_of_classes,sparse_categorical) Build Deep neural networks Model for text classification Shape is input feature space number_of_classes is number of classes """ model = Sequential() layer = list(range(min_hidden_layer_dnn, max_hidden_layer_dnn)) node = list(range(min_nodes_dnn, max_nodes_dnn)) Numberof_NOde = random.choice(node) nLayers = random.choice(layer) Numberof_NOde_old = Numberof_NOde model.add( Dense(Numberof_NOde, input_dim=shape, activation='relu', kernel_regularizer=l2(_l2))) model.add(Dropout(dropout)) for i in range(0, nLayers): Numberof_NOde = random.choice(node) model.add( Dense(Numberof_NOde, input_dim=Numberof_NOde_old, activation='relu', kernel_regularizer=l2(_l2))) model.add(Dropout(dropout)) Numberof_NOde_old = Numberof_NOde if number_of_classes == 2: model.add(Dense(1, activation='sigmoid', kernel_regularizer=l2(_l2))) model_tmp = model model.compile(loss='binary_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score(), km.binary_true_positive(), km.binary_true_negative(), km.binary_false_positive(), km.binary_false_negative() ]) else: model.add( Dense(number_of_classes, activation='softmax', kernel_regularizer=l2(_l2))) model_tmp = model if sparse_categorical: model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.sparse_categorical_precision(), km.sparse_categorical_recall(), km.sparse_categorical_f1_score(), km.sparse_categorical_true_positive(), km.sparse_categorical_true_negative(), km.sparse_categorical_false_positive(), km.sparse_categorical_false_negative() ]) else: model.compile(loss='categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative() ]) return model, model_tmp
# Add Embedding layer # The final sigmoid layer outputs probability values between [0, 1] model = models.Sequential() model.add(layers.Embedding(10000, 8, input_length=data.shape[1])) model.add(layers.Flatten()) model.add(layers.Dense(1, activation='sigmoid')) # ========================= # Train model # ========================= # As the model outputs probabilities, binary crossentropy is the best loss # metric as it measures the distance between probability distributions model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=[km.binary_precision(), km.binary_recall()]) history = model.fit(x_train, y_train, epochs=20, batch_size=512, validation_data=(x_val, y_val)) # Prep history dictionary precision = history.history['precision'] val_precision = history.history['val_precision'] recall = history.history['recall'] val_recall = history.history['val_recall'] epochs = range(1, len(precision) + 1)
BATCH = 64 EPOCHS = 100 # Definir el modelo: model = GlandsDetector(input_shape=(32,32,6), output=2) # Definir el optimizador: adam = Adam() # Compilar el modelo: model.compile( loss='binary_crossentropy', optimizer=adam, metrics=['binary_accuracy', km.binary_f1_score(label=1), km.binary_precision(label=1), km.binary_recall(label=1)] ) callback_list = [LearningRateScheduler(step_decay)] # Entrenar: history = model.fit( X_train, y_train_ohe, batch_size=BATCH, epochs=EPOCHS, validation_data=(X_val, y_val_ohe), class_weight=class_weights, callbacks=callback_list, verbose=2 )
output_shape) callback = ModelSaveBestAvgAcc(filepath="model-{epoch:02d}-{avgacc:.2f}.hdf5", verbose=True, cond=filter_val_f1score) losses = [] for i in range(0, 6): losses.append(binary_focal_loss(gamma=2.)) model = get_model(input_shape) model.compile(optimizer=opt.Adam(lr=1e-4), loss=losses, metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score() ]) model.summary() model.fit_generator(gen_train, steps_per_epoch=len(dataset_train.image_ids) // batch_size, epochs=epochs, validation_data=gen_val, validation_steps=len(dataset_val.image_ids) // batch_size, callbacks=[callback], verbose=2) print('fine')
def __init__(self, nn_rank_embedding_model, opts): self.opts = opts self.dense_dims = opts.dense_dims self.query_title_model = nn_rank_embedding_model['query-title'] self.query_abstract_model = nn_rank_embedding_model['query-abstract'] self.candidate_title_model = nn_rank_embedding_model['candidate-title'] self.candidate_abstract_model = nn_rank_embedding_model[ 'candidate-abstract'] model_inputs = [ self.query_title_model.input, self.candidate_title_model.input, self.query_abstract_model.input, self.candidate_abstract_model.input ] pre_dense_network_output = [] cos_sim_text = cosine_distance(self.query_title_model.output, self.candidate_title_model.output, self.dense_dims, True) cos_sim_abstract = cosine_distance( self.query_abstract_model.output, self.candidate_abstract_model.output, self.dense_dims, True) pre_dense_network_output.append(cos_sim_text) pre_dense_network_output.append(cos_sim_abstract) for field in ['title', 'abstract']: common_type_input = Input( name='query-candidate-common-{}'.format(field), shape=(None, )) elementwise_sparse = EmbeddingLayer( input_dim=self.opts.n_features, output_dim=1, mask_zero=True, name="{}-sparse-embedding".format(field), activity_regularizer=l1( self.opts.l1_lambda))(common_type_input) pre_dense_network_output.append( summation_layer()(elementwise_sparse)) model_inputs.append(common_type_input) citation_count_input = Input(shape=(1, ), dtype='float32', name='candidate-citation-count') model_inputs.append(citation_count_input) pre_dense_network_output.append(citation_count_input) similarity_score_input = Input(shape=(1, ), dtype='float32', name='similarity-score') model_inputs.append(similarity_score_input) pre_dense_network_output.append(similarity_score_input) input_to_dense_layer = Concatenate()(pre_dense_network_output) output_dense_one = Dense(20, name='dense-1', activation='elu')(input_to_dense_layer) output_dense_two = Dense(20, name='dense-2', activation='elu')(output_dense_one) nn_rank_output = Dense(1, kernel_initializer='one', name='final-output', activation='sigmoid')(output_dense_two) self.model = Model(inputs=model_inputs, outputs=nn_rank_output) self.model.compile(optimizer='nadam', loss=triplet_loss, metrics=[ 'accuracy', mean_reciprocal_rank, binary_precision(), binary_recall(), f1_measure ]) self.callbacks = [ SaveModelWeights([('nn_rank', self.model)], self.opts.weights_directory, self.opts.checkpoint_frequency) ]
def f1_m(y_true, y_pred): precision = km.binary_precision(y_true, y_pred) recall = km.binary_recall(y_true, y_pred) return 2 * ((precision * recall) / (precision + recall + K.epsilon()))
beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False) import keras_metrics as km def f1_m(y_true, y_pred): precision = km.binary_precision(y_true, y_pred) recall = km.binary_recall(y_true, y_pred) return 2 * ((precision * recall) / (precision + recall + K.epsilon())) model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['acc', km.binary_precision(), km.binary_recall()]) print('model fitting...') import numpy as np import time #def load_image(x, y): # print(x[13]) # x[13] = np.load('./imagess'+x[13][1:]+'.npy').reshape(1,64,2048) # x[6] = np.load('./imagess'+x[6][1:]+'.npy').reshape(1,64,2048) # return x,y #print('loading X_train images.. ') #t_train = time.time() #X_train = tf.data.Dataset.from_tensor_slices((X_train, y_train)).map(load_image) #print(time.time()-t_train, 'loading X_test images..') #t_test = time.time()
def __init__(self, model_file=None, weights_file=None): self.model_file = model_file self.weights_file = weights_file self.l = 500 self.k = 13 self.m = 9 self.model = None # See if there is a given model and weights file if self.model_file is not None: json_file = open(self.model_file, 'r') loaded_model_json = json_file.read() json_file.close() self.model = model_from_json(loaded_model_json) if self.weights in not None: self.model.load_weights(weights_file) else: self.model = Sequential() self.model.add(Embedding(xmax, k, input_length = l)) self.model.add(Lambda(lambda x: expand_dims(x, 3))) self.model.add(Conv2D(512, (m, k), activation = "relu")) self.model.add(MaxPooling2D(pool_size = (l - m + 1, 1))) self.model.add(Flatten()) self.model.add(Dropout(0.5)) self.model.add(Dense(64, activation = "relu")) self.model.add(Dense(16, activation = "relu")) self.model.add(Dense(2, activation = "softmax")) # Print out the model and compile it self.model.summary() self.model.compile(optimizer = "adam", loss = "categorical_crossentropy", metrics = [keras_metrics.binary_precision(), keras_metrics.binary_recall()])
def Build_Model_DNN_Image(shape, number_of_classes, sparse_categorical, min_hidden_layer_dnn, max_hidden_layer_dnn, min_nodes_dnn, max_nodes_dnn, random_optimizor, dropout): ''' buildModel_DNN_image(shape, number_of_classes,sparse_categorical) Build Deep neural networks Model for text classification Shape is input feature space number_of_classes is number of classes ''' model = Sequential() values = list(range(min_nodes_dnn, max_nodes_dnn)) Numberof_NOde = random.choice(values) Lvalues = list(range(min_hidden_layer_dnn, max_hidden_layer_dnn)) nLayers = random.choice(Lvalues) print(shape) model.add(Flatten(input_shape=shape)) model.add(Dense(Numberof_NOde, activation='relu')) model.add(Dropout(dropout)) for i in range(0, nLayers - 1): Numberof_NOde = random.choice(values) model.add(Dense(Numberof_NOde, activation='relu')) model.add(Dropout(dropout)) if number_of_classes == 2: model.add(Dense(1, activation='sigmoid')) model_tmp = model model.compile(loss='binary_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score(), km.binary_true_positive(), km.binary_true_negative(), km.binary_false_positive(), km.binary_false_negative() ]) else: model.add(Dense(number_of_classes, activation='softmax')) model_tmp = model if sparse_categorical: model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.sparse_categorical_precision(), km.sparse_categorical_recall(), km.sparse_categorical_f1_score(), km.sparse_categorical_true_positive(), km.sparse_categorical_true_negative(), km.sparse_categorical_false_positive(), km.sparse_categorical_false_negative() ]) else: model.compile(loss='categorical_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative() ]) return model, model_tmp
finetune_model = Model(inputs=base_model.input, outputs=predictions) return finetune_model FC_LAYERS = [200, 50] dropout = 0.5 finetune_model = build_finetune_model(base_model, dropout=dropout, fc_layers=FC_LAYERS, num_classes=len(class_list)) recall = km.binary_recall(label=1) precision = km.binary_precision(label=1) finetune_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[ 'accuracy', auc_roc, km.binary_precision(label=1), km.binary_recall(label=1) ]) print('fitting model') class_weights = {0: 1., 1: 1.} STEP_SIZE_TRAIN = train_gen.n // train_gen.batch_size STEP_SIZE_VALID = val_gen.n // val_gen.batch_size es = EarlyStopping(monitor='val_loss',
pr = np.zeros((3,4)) rec = np.zeros((3,4)) tp = np.zeros((3,4)) fp = np.zeros((3,4)) tn = np.zeros((3,4)) fn = np.zeros((3,4)) for i in range(3): for j in range(4): epochs, batch_size, n_neurons, dropout = epochs_arr[i], batch_size_arr[j], 1000, 0 n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], trainy.shape[1] model = Sequential() model.add(LSTM(n_neurons, input_shape=(n_timesteps,n_features))) model.add(Dropout(dropout)) model.add(Dense(n_neurons, activation='relu')) model.add(Dense(n_outputs, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['binary_accuracy', km.binary_precision(), km.binary_recall(), km.binary_true_positive(), km.binary_false_positive(), km.binary_true_negative(), km.binary_false_negative()]) # fit network model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=1) # evaluate model _, ba[i,j], pr[i,j], rec[i,j], tp[i,j], fp[i,j], tn[i,j], fn[i,j] = model.evaluate(testX, testy, batch_size=batch_size, verbose=0) #print(ba, pr, rec, tp, fp, tn, fn) #print(hist.history) np.savetxt('/home/mdzadik/CAN_data/pickles/ba_'+str(n_neurons)+'_'+str(dropout)+'.csv',ba,delimiter=",") np.savetxt('/home/mdzadik/CAN_data/pickles/pr_'+str(n_neurons)+'_'+str(dropout)+'.csv',pr,delimiter=",") np.savetxt('/home/mdzadik/CAN_data/pickles/rec_'+str(n_neurons)+'_'+str(dropout)+'.csv',rec,delimiter=",") np.savetxt('/home/mdzadik/CAN_data/pickles/tp_'+str(n_neurons)+'_'+str(dropout)+'.csv',tp,delimiter=",") np.savetxt('/home/mdzadik/CAN_data/pickles/fp_'+str(n_neurons)+'_'+str(dropout)+'.csv',fp,delimiter=",") np.savetxt('/home/mdzadik/CAN_data/pickles/tn_'+str(n_neurons)+'_'+str(dropout)+'.csv',tn,delimiter=",") np.savetxt('/home/mdzadik/CAN_data/pickles/fn_'+str(n_neurons)+'_'+str(dropout)+'.csv',fn,delimiter=",")
def get_predictions(text): sequence = tokenizer.texts_to_sequences([text]) sequence = pad_sequences(sequence, maxlen=SEQUENCE_LENGTH) model = load_model('Spam-model-compiled.h5',custom_objects={'binary_precision':keras_metrics.binary_precision(),'binary_recall':keras_metrics.binary_recall()}) prediction = model.predict(sequence)[0] return int2label[np.argmax(prediction)]