def generate_model(params): inputs = Input(shape=(PARAMS['sequence_len'],), dtype='int32') input_layer = Embedding(input_dim=params['vocab_size'], output_dim=params['embedding_dim'], input_length=PARAMS['sequence_len'], weights=[params['embedding_matrix']], trainable=False)(inputs) input_layer = SpatialDropout1D(0.2)(input_layer) i1 = Bidirectional(CuDNNLSTM(params['embedding_dim']*2, return_sequences=True))(input_layer) i1 = SeqSelfAttention()(i1) i1 = Concatenate(axis=1)([GlobalAveragePooling1D()(i1), GlobalMaxPooling1D()(i1)]) i2 = Bidirectional(CuDNNGRU(params['embedding_dim'], return_sequences=True))(input_layer) i2 = SeqSelfAttention()(i2) i2 = Concatenate(axis=1)([GlobalAveragePooling1D()(i2), GlobalMaxPooling1D()(i2)]) concatenated_tensor = Concatenate(axis=1)([i1, i2]) concatenated_tensor = Dense(params['num_classes']*2, activation = 'relu')(concatenated_tensor) concatenated_tensor = BatchNormalization()(concatenated_tensor) concatenated_tensor = Dropout(0.1)(concatenated_tensor) output = Dense(params['num_classes'], activation="softmax")(concatenated_tensor) model = Model(inputs=inputs, outputs=output) opt=Adam() model.compile(optimizer=opt, loss=params['loss'], metrics=['accuracy', categorical_accuracy, keras_metrics.categorical_recall(), util.balanced_recall, util.f1 ]) model.summary() return model, params
def hybridModel(embeddingMatrix, maxDataLenght, embeddingVectorLength, numAttributes, numNeurons): model = Sequential() model.add( Embedding(input_dim=numAttributes, output_dim=embeddingVectorLength, weights=[embeddingMatrix], input_length=maxDataLenght, trainable=False)) model.add(Dropout(0.2)) model.add(Conv1D(64, 5, activation='relu')) model.add(MaxPooling1D(pool_size=4)) model.add( Bidirectional(LSTM(numNeurons, return_sequences=False), merge_mode="sum")) model.add( Dense(2, activation='softmax', kernel_regularizer=regularizers.l2(0.001))) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[ 'accuracy', km.categorical_f1_score(), km.categorical_precision(), km.categorical_recall() ]) return model
def get_custom_metrics(): custom_metrics = [] custom_metrics.append(keras_metrics.categorical_f1_score()) custom_metrics.append(keras_metrics.categorical_precision()) custom_metrics.append(keras_metrics.categorical_recall()) custom_metrics = {m.__name__: m for m in custom_metrics} custom_metrics["sin"] = K.sin custom_metrics["abs"] = K.abs return custom_metrics
def test_average_recall(self): model = keras.models.Sequential() model.add(keras.layers.Activation(keras.backend.sin)) model.add(keras.layers.Activation(keras.backend.abs)) model.add(keras.layers.Softmax()) model.compile(optimizer="sgd", loss="categorical_crossentropy", metrics=[ km.categorical_recall(label=0), km.categorical_recall(label=1), km.categorical_recall(label=2), km.categorical_average_recall(labels=3), ]) x, y = self.create_samples(10000, labels=3) model.fit(x, y, epochs=10, batch_size=100) metrics = model.evaluate(x, y, batch_size=100)[1:] r0, r1, r2 = metrics[0:3] average_recall = metrics[3] expected_recall = (r0 + r1 + r2) / 3.0 self.assertAlmostEqual(expected_recall, average_recall, places=3)
def load_model(model_path): precision = km.categorical_precision() recall = km.categorical_recall() f1_score = km.categorical_f1_score() model = keras.models.load_model(model_path, custom_objects={ 'AdaBound': AdaBound, 'categorical_precision': precision, 'categorical_recall': recall, 'categorical_f1_score': f1_score }) decay = LR_FINAL / EPOCHS optm = AdaBound(lr=0.001, final_lr=LR_FINAL, gamma=1e-03, weight_decay=decay, amsbound=False) return model
def generate_model(params): inputs = Input(shape=(PARAMS['sequence_len'], ), dtype='int32') input_layer = Embedding(input_dim=params['vocab_size'], output_dim=params['embedding_dim'], input_length=PARAMS['sequence_len'], weights=[params['embedding_matrix']], trainable=False)(inputs) i1 = Conv1D(params['embedding_dim'] * 2, 2, activation='relu')(input_layer) i1 = Concatenate(axis=1)([ SeqWeightedAttention()(i1), GlobalMaxPooling1D()(i1), GlobalAveragePooling1D()(i1) ]) i2 = Conv1D(params['embedding_dim'] * 2, 3, activation='relu')(input_layer) i2 = Concatenate(axis=1)([ SeqWeightedAttention()(i2), GlobalMaxPooling1D()(i2), GlobalAveragePooling1D()(i2) ]) i3 = Conv1D(params['embedding_dim'] * 2, 4, activation='relu')(input_layer) i3 = Concatenate(axis=1)([ SeqWeightedAttention()(i3), GlobalMaxPooling1D()(i3), GlobalAveragePooling1D()(i3) ]) concatenated_tensor = Concatenate(axis=1)([i1, i2, i3]) #flatten = Flatten()(concatenated_tensor) concatenated_tensor = Dropout(0.1)(concatenated_tensor) output = Dense(params['num_classes'], activation="softmax")(concatenated_tensor) model = Model(inputs=inputs, outputs=output) opt = Adam() model.compile(optimizer=opt, loss=params['loss'], metrics=[ 'accuracy', categorical_accuracy, keras_metrics.categorical_recall(), util.balanced_recall ]) model.summary() return model, params
def get_metrics_fresh(metrics, nr_classes): """ Function takes a list of metrics and creates fresh tensors accordingly. This is necessary, after re-compiling the model because the placeholder has to be updated """ f1 = any(["f1_score" in str(a) for a in metrics]) precision = any(["precision" in str(a) for a in metrics]) recall = any(["recall" in str(a) for a in metrics]) Metrics = [] if f1 == True: for class_ in range(nr_classes): Metrics.append(keras_metrics.categorical_f1_score(label=class_)) if precision == True: for class_ in range(nr_classes): Metrics.append(keras_metrics.categorical_precision(label=class_)) if recall == True: for class_ in range(nr_classes): Metrics.append(keras_metrics.categorical_recall(label=class_)) metrics = ['accuracy'] + Metrics return metrics
def get_model(): input_tensor = Input(shape=(IMAGE_SIZE[0], IMAGE_SIZE[1], 3)) base_model = Xception(input_shape=(IMAGE_SIZE[0], IMAGE_SIZE[1], 3), include_top=False, weights=None, input_tensor=input_tensor, pooling='avg', classes=N_CLASSES) x = base_model.output predictions = Dense(N_CLASSES, activation='softmax')(x) model = keras.models.Model(inputs=base_model.input, outputs=predictions) decay = LR_FINAL / EPOCHS optm = AdaBound(lr=0.01, final_lr=LR_FINAL, gamma=1e-03, weight_decay=decay, amsbound=False) precision = km.categorical_precision() recall = km.categorical_recall() f1_score = km.categorical_f1_score() model.compile(optimizer=optm, loss='categorical_crossentropy', metrics=['accuracy', precision, recall, f1_score]) return model
def Build_Model_CNN_Text(word_index, embedding_index, number_of_classes, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM, sparse_categorical, min_hidden_layer_cnn, max_hidden_layer_cnn, min_nodes_cnn, max_nodes_cnn, random_optimizor, dropout, simple_model=False, _l2=0.01, lr=1e-3): """ def buildModel_CNN(word_index,embedding_index,number_of_classes,MAX_SEQUENCE_LENGTH,EMBEDDING_DIM,Complexity=0): word_index in word index , embedding_index is embeddings index, look at data_helper.py number_of_classes is number of classes, MAX_SEQUENCE_LENGTH is maximum lenght of text sequences, EMBEDDING_DIM is an int value for dimention of word embedding look at data_helper.py Complexity we have two different CNN model as follows F=0 is simple CNN with [1 5] hidden layer Complexity=2 is more complex model of CNN with filter_length of range [1 10] """ model = Sequential() if simple_model: embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM)) for word, i in word_index.items(): embedding_vector = embedding_index.get(word) if embedding_vector is not None: # words not found in embedding index will be all-zeros. embedding_matrix[i] = embedding_vector else: embedding_matrix[i] = embedding_index['UNK'] model.add( Embedding(len(word_index) + 1, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=True)) values = list(range(min_nodes_cnn, max_nodes_cnn)) Layer = list(range(min_hidden_layer_cnn, max_hidden_layer_cnn)) Layer = random.choice(Layer) for i in range(0, Layer): Filter = random.choice(values) model.add( Conv1D(Filter, 5, activation='relu', kernel_regularizer=l2(_l2))) model.add(Dropout(dropout)) model.add(MaxPooling1D(5)) model.add(Flatten()) Filter = random.choice(values) model.add(Dense(Filter, activation='relu', kernel_regularizer=l2(_l2))) model.add(Dropout(dropout)) Filter = random.choice(values) model.add(Dense(Filter, activation='relu', kernel_regularizer=l2(_l2))) model.add(Dropout(dropout)) if number_of_classes == 2: model.add( Dense(1, activation='sigmoid', kernel_regularizer=l2(_l2))) model_tmp = model model.compile(loss='binary_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score(), km.binary_true_positive(), km.binary_true_negative(), km.binary_false_positive(), km.binary_false_negative() ]) else: model.add( Dense(number_of_classes, activation='softmax', kernel_regularizer=l2(_l2))) model_tmp = model if sparse_categorical: model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.sparse_categorical_precision(), km.sparse_categorical_recall(), km.sparse_categorical_f1_score(), km.sparse_categorical_true_positive(), km.sparse_categorical_true_negative(), km.sparse_categorical_false_positive(), km.sparse_categorical_false_negative() ]) else: model.compile(loss='categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative() ]) else: embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM)) for word, i in word_index.items(): embedding_vector = embedding_index.get(word) if embedding_vector is not None: # words not found in embedding index will be all-zeros. embedding_matrix[i] = embedding_vector else: embedding_matrix[i] = embedding_index['UNK'] embedding_layer = Embedding(len(word_index) + 1, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=True) # applying a more complex convolutional approach convs = [] values_layer = list(range(min_hidden_layer_cnn, max_hidden_layer_cnn)) filter_sizes = [] layer = random.choice(values_layer) print("Filter ", layer) for fl in range(0, layer): filter_sizes.append((fl + 2)) values_node = list(range(min_nodes_cnn, max_nodes_cnn)) node = random.choice(values_node) print("Node ", node) sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH, ), dtype='int32') embedded_sequences = embedding_layer(sequence_input) for fsz in filter_sizes: l_conv = Conv1D(node, kernel_size=fsz, activation='relu')(embedded_sequences) l_pool = MaxPooling1D(5)(l_conv) #l_pool = Dropout(0.25)(l_pool) convs.append(l_pool) l_merge = Concatenate(axis=1)(convs) l_cov1 = Conv1D(node, 5, activation='relu')(l_merge) l_cov1 = Dropout(dropout)(l_cov1) l_pool1 = MaxPooling1D(5)(l_cov1) l_cov2 = Conv1D(node, 5, activation='relu')(l_pool1) l_cov2 = Dropout(dropout)(l_cov2) l_pool2 = MaxPooling1D(30)(l_cov2) l_flat = Flatten()(l_pool2) l_dense = Dense(1024, activation='relu')(l_flat) l_dense = Dropout(dropout)(l_dense) l_dense = Dense(512, activation='relu')(l_dense) l_dense = Dropout(dropout)(l_dense) if number_of_classes == 2: preds = Dense(1, activation='sigmoid')(l_dense) else: preds = Dense(number_of_classes, activation='softmax')(l_dense) model = Model(sequence_input, preds) model_tmp = model if number_of_classes == 2: model.compile(loss='binary_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score(), km.binary_true_positive(), km.binary_true_negative(), km.binary_false_positive(), km.binary_false_negative() ]) else: if sparse_categorical: model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.sparse_categorical_precision(), km.sparse_categorical_recall(), km.sparse_categorical_f1_score(), km.sparse_categorical_true_positive(), km.sparse_categorical_true_negative(), km.sparse_categorical_false_positive(), km.sparse_categorical_false_negative() ]) else: model.compile(loss='categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative() ]) return model, model_tmp
def Build_Model_RNN_Text(word_index, embedding_index, number_of_classes, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM, sparse_categorical, min_hidden_layer_rnn, max_hidden_layer_rnn, min_nodes_rnn, max_nodes_rnn, random_optimizor, dropout, use_cuda=True, use_bidirectional=True, _l2=0.01, lr=1e-3): """ def buildModel_RNN(word_index, embedding_index, number_of_classes, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM, sparse_categorical): word_index in word index , embedding_index is embeddings index, look at data_helper.py number_of_classes is number of classes, MAX_SEQUENCE_LENGTH is maximum lenght of text sequences """ Recurrent = CuDNNGRU if use_cuda else GRU model = Sequential() values = list(range(min_nodes_rnn, max_nodes_rnn + 1)) values_layer = list(range(min_hidden_layer_rnn - 1, max_hidden_layer_rnn)) layer = random.choice(values_layer) print(layer) embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM)) for word, i in word_index.items(): embedding_vector = embedding_index.get(word) if embedding_vector is not None: # words not found in embedding index will be all-zeros. embedding_matrix[i] = embedding_vector else: embedding_matrix[i] = embedding_index['UNK'] model.add( Embedding(len(word_index) + 1, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=True)) gru_node = random.choice(values) print(gru_node) for i in range(0, layer): if use_bidirectional: model.add( Bidirectional( Recurrent(gru_node, return_sequences=True, kernel_regularizer=l2(_l2)))) else: model.add( Recurrent(gru_node, return_sequences=True, kernel_regularizer=l2(_l2))) model.add(Dropout(dropout)) if use_bidirectional: model.add( Bidirectional(Recurrent(gru_node, kernel_regularizer=l2(_l2)))) else: model.add(Recurrent(gru_node, kernel_regularizer=l2(_l2))) model.add(Dropout(dropout)) model.add(Dense(256, activation='relu', kernel_regularizer=l2(_l2))) if number_of_classes == 2: model.add(Dense(1, activation='sigmoid', kernel_regularizer=l2(_l2))) model_tmp = model model.compile(loss='binary_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score(), km.binary_true_positive(), km.binary_true_negative(), km.binary_false_positive(), km.binary_false_negative() ]) else: model.add( Dense(number_of_classes, activation='softmax', kernel_regularizer=l2(_l2))) model_tmp = model if sparse_categorical: model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.sparse_categorical_precision(), km.sparse_categorical_recall(), km.sparse_categorical_f1_score(), km.sparse_categorical_true_positive(), km.sparse_categorical_true_negative(), km.sparse_categorical_false_positive(), km.sparse_categorical_false_negative() ]) else: model.compile(loss='categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative() ]) return model, model_tmp
def Build_Model_RNN_Image(shape, number_of_classes, sparse_categorical, min_nodes_rnn, max_nodes_rnn, random_optimizor, dropout): """ def Image_model_RNN(num_classes,shape): num_classes is number of classes, shape is (w,h,p) """ values = list(range(min_nodes_rnn - 1, max_nodes_rnn)) node = random.choice(values) x = Input(shape=shape) # Encodes a row of pixels using TimeDistributed Wrapper. encoded_rows = TimeDistributed(CuDNNLSTM(node, recurrent_dropout=dropout))(x) node = random.choice(values) # Encodes columns of encoded rows. encoded_columns = CuDNNLSTM(node, recurrent_dropout=dropout)(encoded_rows) # Final predictions and model. #prediction = Dense(256, activation='relu')(encoded_columns) if number_of_classes == 2: prediction = Dense(1, activation='sigmoid')(encoded_columns) else: prediction = Dense(number_of_classes, activation='softmax')(encoded_columns) model = Model(x, prediction) model_tmp = model if number_of_classes == 2: model.compile(loss='binary_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score(), km.binary_true_positive(), km.binary_true_negative(), km.binary_false_positive(), km.binary_false_negative() ]) else: if sparse_categorical: model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.sparse_categorical_precision(), km.sparse_categorical_recall(), km.sparse_categorical_f1_score(), km.sparse_categorical_true_positive(), km.sparse_categorical_true_negative(), km.sparse_categorical_false_positive(), km.sparse_categorical_false_negative() ]) else: model.compile(loss='categorical_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative() ]) return model, model_tmp
def Build_Model_CNN_Image(shape, number_of_classes, sparse_categorical, min_hidden_layer_cnn, max_hidden_layer_cnn, min_nodes_cnn, max_nodes_cnn, random_optimizor, dropout): """"" def Image_model_CNN(num_classes,shape): num_classes is number of classes, shape is (w,h,p) """ "" model = Sequential() values = list(range(min_nodes_cnn, max_nodes_cnn)) Layers = list(range(min_hidden_layer_cnn, max_hidden_layer_cnn)) Layer = random.choice(Layers) Filter = random.choice(values) model.add(Conv2D(Filter, (3, 3), padding='same', input_shape=shape)) model.add(Activation('relu')) model.add(Conv2D(Filter, (3, 3))) model.add(Activation('relu')) for i in range(0, Layer): Filter = random.choice(values) model.add(Conv2D(Filter, (3, 3), padding='same')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(dropout)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(dropout)) if number_of_classes == 2: model.add(Dense(1, activation='sigmoid', kernel_constraint=maxnorm(3))) model_tmp = model model.compile(loss='binary_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score(), km.binary_true_positive(), km.binary_true_negative(), km.binary_false_positive(), km.binary_false_negative() ]) else: model.add( Dense(number_of_classes, activation='softmax', kernel_constraint=maxnorm(3))) model_tmp = model if sparse_categorical: model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.sparse_categorical_precision(), km.sparse_categorical_recall(), km.sparse_categorical_f1_score(), km.sparse_categorical_true_positive(), km.sparse_categorical_true_negative(), km.sparse_categorical_false_positive(), km.sparse_categorical_false_negative() ]) else: model.compile(loss='categorical_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative() ]) return model, model_tmp
def Build_Model_DNN_Text(shape, number_of_classes, sparse_categorical, min_hidden_layer_dnn, max_hidden_layer_dnn, min_nodes_dnn, max_nodes_dnn, random_optimizor, dropout, _l2=0.01, lr=1e-3): """ buildModel_DNN_Tex(shape, number_of_classes,sparse_categorical) Build Deep neural networks Model for text classification Shape is input feature space number_of_classes is number of classes """ model = Sequential() layer = list(range(min_hidden_layer_dnn, max_hidden_layer_dnn)) node = list(range(min_nodes_dnn, max_nodes_dnn)) Numberof_NOde = random.choice(node) nLayers = random.choice(layer) Numberof_NOde_old = Numberof_NOde model.add( Dense(Numberof_NOde, input_dim=shape, activation='relu', kernel_regularizer=l2(_l2))) model.add(Dropout(dropout)) for i in range(0, nLayers): Numberof_NOde = random.choice(node) model.add( Dense(Numberof_NOde, input_dim=Numberof_NOde_old, activation='relu', kernel_regularizer=l2(_l2))) model.add(Dropout(dropout)) Numberof_NOde_old = Numberof_NOde if number_of_classes == 2: model.add(Dense(1, activation='sigmoid', kernel_regularizer=l2(_l2))) model_tmp = model model.compile(loss='binary_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score(), km.binary_true_positive(), km.binary_true_negative(), km.binary_false_positive(), km.binary_false_negative() ]) else: model.add( Dense(number_of_classes, activation='softmax', kernel_regularizer=l2(_l2))) model_tmp = model if sparse_categorical: model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.sparse_categorical_precision(), km.sparse_categorical_recall(), km.sparse_categorical_f1_score(), km.sparse_categorical_true_positive(), km.sparse_categorical_true_negative(), km.sparse_categorical_false_positive(), km.sparse_categorical_false_negative() ]) else: model.compile(loss='categorical_crossentropy', optimizer=optimizors(random_optimizor, lr), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative() ]) return model, model_tmp
stopping = EarlyStopping(monitor='val_loss', min_delta=0.003, patience=1, verbose=0, mode='min', baseline=None, restore_best_weights=True) callbacks_list = [] wcce = weighted_categorical_crossentropy(class_weights) seg_model.compile(loss=wcce, optimizer="Adam", metrics=[ 'accuracy', iou_coef, keras_metrics.categorical_precision(label=0), keras_metrics.categorical_recall(label=0), keras_metrics.categorical_precision(label=1), keras_metrics.categorical_recall(label=1), keras_metrics.categorical_precision(label=2), keras_metrics.categorical_recall(label=2) ]) if (DATASET == dataset["cwfid"]): print_shapes(x_train, y_train, x_test, y_test) history = seg_model.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=NO_OF_EPOCHS, verbose=1, validation_data=(x_test, y_test), shuffle=True,
beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None)) model.add( Dense(units=3, activation='softmax', kernel_regularizer=regularizers.l2(regu))) model.compile( loss='categorical_crossentropy', optimizer= 'adamax', #How to update de learning rate. categorical_crossentropy metrics=[ 'accuracy', km.categorical_precision(label=1), km.categorical_recall(label=1) ]) earlystopping = keras.callbacks.EarlyStopping(monitor='val_precision', min_delta=0, patience=50, verbose=1, mode='max', baseline=None) # history = model.fit(New_array_reduced, one_hot_labels, epochs=3000, validation_split=Cross_s, batch_size=batch_size_value, verbose=0, class_weight=class_weights, callbacks=[earlystopping])
PARAMS = { 'sequence_len': padding_len, 'embedding_dim': 200, 'epochs': 3, 'batch_size': 256, 'loss': 'categorical_crossentropy', 'num_classes': len(util.get_categories()), 'class_weights': None, 'sampler': None, 'k-folds': 4 } PATH = DATA_PATH+'models/'+NAME+'/' DEPENDENCIES = { 'categorical_recall': keras_metrics.categorical_recall(), 'balanced_recall': util.balanced_recall, 'SeqSelfAttention': SeqSelfAttention, 'f1': util.f1 } def load_model(path, extras={}): dependencies = {**DEPENDENCIES, **extras} return keras.models.load_model(path, custom_objects=dependencies) def load_lastest(lang='pt', extras={}): if (len(os.listdir(PATH)) > 0): highest = (0, '') for file in os.listdir(PATH): if(file.startswith('weights') and file.endswith(lang+'.hdf5')): epoch = int(file.split('-')[1])
dummy_y_test= np_utils.to_categorical(test_labels) model=Sequential() model.add(Dense(111, input_dim=111, activation="relu")) model.add(Dense(100, activation="sigmoid")) model.add(Dense(500, activation="relu")) model.add(Dense(100, activation="sigmoid")) model.add(Dense(2,activation="softmax")) def new_recall(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + K.epsilon()) return recall model.compile(loss="categorical_crossentropy",optimizer="adam",metrics=["accuracy", keras_metrics.categorical_recall()]) model.fit(train_features, dummy_y_train, epochs=10, verbose=1) sum(dummy_y_train[:,1]) predictions_train = model.predict_classes(train_features) predictions_test = model.predict_classes(test_features) probs_train = model.predict(train_features) probs_test = model.predict(test_features) conf_m_train_rf = sk.metrics.confusion_matrix(train_labels, predictions_train, labels=None, sample_weight=None) print(conf_m_train_rf) conf_m_test_rf = sk.metrics.confusion_matrix(test_labels, predictions_test, labels=None, sample_weight=None)
model.add(Dense(500, activation="relu")) model.add(Dense(100, activation="sigmoid")) model.add(Dense(2, activation="softmax")) def new_recall(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + K.epsilon()) return recall model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy", keras_metrics.categorical_recall()]) model.fit(train_features, dummy_y_train, epochs=10, verbose=1) sum(dummy_y_train[:, 1]) predictions_train = model.predict_classes(train_features) predictions_test = model.predict_classes(test_features) probs_train = model.predict(train_features) probs_test = model.predict(test_features) predictions_train probs_train sum(predictions_train)
def Build_Model_DNN_Image(shape, number_of_classes, sparse_categorical, min_hidden_layer_dnn, max_hidden_layer_dnn, min_nodes_dnn, max_nodes_dnn, random_optimizor, dropout): ''' buildModel_DNN_image(shape, number_of_classes,sparse_categorical) Build Deep neural networks Model for text classification Shape is input feature space number_of_classes is number of classes ''' model = Sequential() values = list(range(min_nodes_dnn, max_nodes_dnn)) Numberof_NOde = random.choice(values) Lvalues = list(range(min_hidden_layer_dnn, max_hidden_layer_dnn)) nLayers = random.choice(Lvalues) print(shape) model.add(Flatten(input_shape=shape)) model.add(Dense(Numberof_NOde, activation='relu')) model.add(Dropout(dropout)) for i in range(0, nLayers - 1): Numberof_NOde = random.choice(values) model.add(Dense(Numberof_NOde, activation='relu')) model.add(Dropout(dropout)) if number_of_classes == 2: model.add(Dense(1, activation='sigmoid')) model_tmp = model model.compile(loss='binary_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.binary_precision(), km.binary_recall(), km.binary_f1_score(), km.binary_true_positive(), km.binary_true_negative(), km.binary_false_positive(), km.binary_false_negative() ]) else: model.add(Dense(number_of_classes, activation='softmax')) model_tmp = model if sparse_categorical: model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.sparse_categorical_precision(), km.sparse_categorical_recall(), km.sparse_categorical_f1_score(), km.sparse_categorical_true_positive(), km.sparse_categorical_true_negative(), km.sparse_categorical_false_positive(), km.sparse_categorical_false_negative() ]) else: model.compile(loss='categorical_crossentropy', optimizer=optimizors(random_optimizor), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative() ]) return model, model_tmp
def __init__(self, opts, output_size=1, hidden_size=128): self.model = tf.keras.models.Sequential() loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True, reduction=tf.losses.Reduction.NONE) self.model = tf.keras.Sequential() self.model.add(tf.keras.Input(opts['input_shape'])) self.model.add(tf.keras.layers.Reshape([1, opts['input_shape']])) self.model.add(tf.keras.layers.CuDNNLSTM(hidden_size)) self.model.add(tf.keras.layers.Dropout(0.3)) self.model.add(tf.keras.layers.Dense(32, activation='relu')) self.model.add(tf.keras.layers.Dense(output_size, activation='softmax')) self.model.compile(loss=loss, optimizer='adam', metrics=['accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score()])
def __init__(self, opts, output_size=1, filter_length=50, hidden_size=128, kernel_size=2): self.model = tf.keras.models.Sequential() loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True, reduction=tf.losses.Reduction.NONE) self.model.add(tf.keras.Input(opts['input_shape'])) self.model.add(tf.keras.layers.Reshape([1, opts['input_shape']])) self.model.add(tf.keras.layers.Conv1D(filter_length, kernel_size, padding='valid', activation='relu', strides=1, data_format='channels_first')) self.model.add(tf.keras.layers.GlobalMaxPooling1D(data_format='channels_first')) self.model.add(tf.keras.layers.Dense(hidden_size)) self.model.add(tf.keras.layers.Dropout(0.2)) self.model.add(tf.keras.layers.Dense(32, activation='relu')) self.model.add(tf.keras.layers.Dense(output_size, activation='softmax')) self.model.compile(loss=loss, optimizer='adam', metrics=['accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_f1_score()])
def model_training(df_cov9, df_miami, m, fold, n, flag, strm): train = df_cov9[df_cov9['Fold number'] != fold] train = train.reset_index(drop=True) test = df_miami #test= df_cov9[df_cov9['Fold number']==fold] test = test.reset_index(drop=True) print(list(test)) temp, patients_vec_train, patients_label_train, Seq_len = training_data( train, strm) temp, patients_vec_test, patients_label_test, Seq_len_test, row = testing_data( test, strm) if max(Seq_len) > max(Seq_len_test): slen = max(Seq_len) else: slen = max(Seq_len_test) print('Slen' + str(slen)) X_train_aug, y_train_aug = dataaugmentation(patients_vec_train, patients_label_train) X_train = pad_sequences(X_train_aug, slen, padding='pre', truncating='pre', value=0, dtype='float32') Y_train = pad_sequences(y_train_aug, slen, padding='pre', truncating='pre', value=2.) X_test = pad_sequences(patients_vec_test, slen, padding='pre', truncating='pre', value=0, dtype='float32') Y_test = pad_sequences(patients_label_test, slen, padding='pre', truncating='pre', value=2.) Y_categorical_train = k.utils.to_categorical(Y_train, 3) Y_categorical_train = Y_categorical_train.reshape(Y_train.shape[0], Y_train.shape[1], 3) Y_categorical_test = k.utils.to_categorical(Y_test, 3) Y_categorical_test = Y_categorical_test.reshape(Y_test.shape[0], Y_train.shape[1], 3) y_train = Y_categorical_train y_test = Y_categorical_test filepath = "./weights/Miami" + str( m) + "monweights-improvement-{epoch:02d}-{val_precision_1:.3f}.h5py" checkpoint = ModelCheckpoint(filepath, monitor='val_precision_1', verbose=1, save_best_only=True, mode='max') callbacks_list = [checkpoint] num_features = X_test.shape[2] print('num features: ') print(num_features) model = create_model(slen, num_features, n) model.save('OCT_model.h5') print('Model saved!!') try: wei = list(Y_test.reshape(X_test.shape[0] * slen)) print(len(wei)) class_weight = class_weight.compute_class_weight( 'balanced', np.unique(wei), wei) weights = np.array([class_weight[0], class_weight[1], class_weight[2]]) except: weights = np.array([1, 50, 0.1]) print(weights) loss = weighted_categorical_crossentropy(weights) if flag == 1: model.compile(loss=[categorical_focal_loss(alpha=.25, gamma=2)], metrics=[ km.categorical_precision(label=0), km.categorical_precision(label=1), km.categorical_recall(label=0), km.categorical_recall(label=1) ], optimizer=optimizers.RMSprop(lr=0.00001, rho=0.9, epsilon=1e-08, decay=1e-6)) else: model.compile(loss=[categorical_focal_loss(alpha=.25, gamma=2)], metrics=[ km.categorical_precision(label=0), km.categorical_precision(label=1), km.categorical_recall(label=0), km.categorical_recall(label=1) ], optimizer=optimizers.Adam(lr=0.001, decay=1e-6)) history = model.fit(X_train, y_train, batch_size=64, epochs=100, validation_data=(X_test, y_test), callbacks=callbacks_list, shuffle=True) list_of_files = glob.glob( './weights/*.h5py') # * means all if need specific format then *.csv latest_file = max(list_of_files, key=os.path.getctime) print(latest_file) bestmodel = create_model(slen, num_features, n) bestmodel.load_weights(latest_file) batch_size = 50 preds_prob3mon = bestmodel.predict_proba(X_test, batch_size=batch_size) print(preds_prob3mon.shape) ind_preds3mon = preds_prob3mon.reshape(X_test.shape[0] * slen, 3) ind_Y_test3mon = y_test.reshape(X_test.shape[0] * slen, 3) fpr, tpr, thresholds = roc_curve( np.array(ind_Y_test3mon[ind_Y_test3mon[:, 2] == 0, 1]), np.array(ind_preds3mon[ind_Y_test3mon[:, 2] == 0, 1])) roc_auc = auc(fpr, tpr) lr_precision, lr_recall, _ = precision_recall_curve( np.array(ind_Y_test3mon[ind_Y_test3mon[:, 2] == 0, 1]), np.array(ind_preds3mon[ind_Y_test3mon[:, 2] == 0, 1])) lr_auc = auc(lr_recall, lr_precision) return fpr, tpr, roc_auc, ind_preds3mon, ind_Y_test3mon, lr_precision, lr_recall, lr_auc
def train_combined(network, images_dir, csv_dir, csv_data, merge_type, *args): """ Trains a network combining a convolutional network and a multilayer perceptron on images and csv data. Arguments: network : string Name of an implemented CNN on current keras version. images_dir : string Path to a directory with subdirs for each image class. csv_dir : string Path to a directory that containts train/val csv files. """ # Extract parameters from args img_width, img_height, batch_size, lr_rate, epochs, models_dir, logs_dir, gpu_number = args # Get combined and image generators, and number of features in csv files. num_images_train, num_classes_train, features, multi_train_gen = get_combined_generator( network, images_dir, csv_dir, csv_data, 'train', img_width, img_height, batch_size) num_images_val, num_classes_val, features, multi_val_gen = get_combined_generator( network, images_dir, csv_dir, csv_data, 'val', img_width, img_height, batch_size) assert num_classes_train == num_classes_val # Create class weights, useful for imbalanced datasets if num_classes_train == 8: class_weights = {0: 50, 1: 1, 2: 1, 3: 50, 4: 50, 5: 50, 6: 50, 7: 1} # Create model object in keras for both types of inputs model, last_layer_number = get_csv_plus_image_model( network, num_classes_train, features, img_height, img_height, merge_type) # Use a multi-gpu model if available and configured if gpu_number > 1: model = multi_gpu_model(model, gpus=gpu_number) # Create path to save training models and logs top_weights_path = f'B_{merge_type}_{network}' # Compile model and set learning rate model.compile(loss='categorical_crossentropy', optimizer=Adadelta(lr=lr_rate), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), ]) # Get list of training parameters in keras callback_list = get_callback_list( network, top_weights_path, models_dir, logs_dir, ) # Train the model on train split, for half the epochs model.fit_generator(multi_train_gen, steps_per_epoch=num_images_train // batch_size, epochs=epochs // 2, validation_data=multi_val_gen, validation_steps=num_images_val // batch_size, class_weight=class_weights, callbacks=callback_list, use_multiprocessing=True) # Load the best model from previous training phase model.load_weights(f'{models_dir}/{network}/{top_weights_path}.h5') # After training for a few epochs, freeze the bottom layers, and train only the last ones. if last_layer_number > 0: for layer in model.layers[:last_layer_number]: layer.trainable = False for layer in model.layers[last_layer_number:]: layer.trainable = True # Compile model with frozen layers, and set learning rate model.compile(loss='categorical_crossentropy', optimizer=Adadelta(lr=lr_rate), metrics=[ 'accuracy', km.categorical_precision(), km.categorical_recall(), ]) # Get list of training parameters in keras callback_list = get_callback_list(network, top_weights_path, models_dir, logs_dir, patience=30) # Train the model on train split, for the second half epochs model.fit_generator(multi_train_gen, steps_per_epoch=num_images_train // batch_size, epochs=epochs // 2, validation_data=multi_val_gen, validation_steps=num_images_val // batch_size, class_weight=class_weights, callbacks=callback_list, use_multiprocessing=True)
s.run(tf.compat.v1.global_variables_initializer()) model.fit(train_images, train_labels, epochs=epochs, batch_size=batch_size, callbacks=[KafkaCallback(session_name)]) model.fit(train_images, train_labels, epochs=epochs, batch_size=batch_size, callbacks=[KafkaCallback(session_name)]) test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2, callbacks=[KafkaCallback(session_name)]) print('\nTest accuracy:', test_acc) if __name__ == "__main__": METRICS = [ 'accuracy', km.categorical_precision(), km.categorical_recall(), km.categorical_true_positive(), km.categorical_true_negative(), km.categorical_false_positive(), km.categorical_false_negative(), ] LOSS = [ (1, tf.keras.losses.BinaryCrossentropy(from_logits=True), 'Binary crossentropy'), (2, tf.keras.losses.CategoricalCrossentropy(from_logits=True), 'Categorical crossentropy'), (3, tf.keras.losses.CategoricalHinge(), 'Categorical hinge'), (4, tf.keras.losses.CosineSimilarity(), 'Cosine similarity'), (5, tf.keras.losses.Hinge(), 'Hinge'), (6, tf.keras.losses.Huber(), 'Huber'), (7, tf.keras.losses.SquaredHinge(), 'Squared hinge'), (8, tf.keras.losses.LogCosh(), 'Hyperbolic Cosine'), (9, tf.keras.losses.MeanAbsoluteError(), 'Mean absolute error'),
def apply_CNN(): # Importing the Keras libraries and packages from keras.models import Sequential from keras.layers import Convolution2D, BatchNormalization, Dropout from keras.layers import MaxPooling2D from keras.layers import Flatten from keras.layers import Dense #Imports for collecting metrics import keras_metrics as km import tensorflow as tf #import tensorflow.keras as keras # Initialising the CNN classifier = Sequential() # Step 1 - Convolution classifier.add( Convolution2D(32, 3, 3, input_shape=(64, 64, 3), activation='relu')) classifier.add(BatchNormalization()) # Step 2 - Pooling classifier.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) classifier.add(Dropout(0.2)) # Adding a second convolutional layer classifier.add(Convolution2D(32, 3, 3, activation='relu')) classifier.add(BatchNormalization()) classifier.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) classifier.add(Dropout(0.5)) classifier.add(Flatten()) # Step 4 - Full connection classifier.add(Dense(output_dim=128, activation='relu')) classifier.add(BatchNormalization()) classifier.add(Dropout(0.2)) classifier.add(Dense(output_dim=3, activation='softmax')) # catgorical # SET METRICS precision = km.categorical_precision() recall = km.categorical_recall() f1 = km.categorical_f1_score() classifier.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy', precision, recall, f1]) # Part 2 - Fitting the CNN to the images from keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator() test_datagen = ImageDataGenerator() seed = 7 training_set = train_datagen.flow_from_directory( 'training', target_size=(64, 64), batch_size=32, class_mode='categorical', shuffle=True, seed=seed) #,save_to_dir = 'generatedimages') #categorical,binary test_set = test_datagen.flow_from_directory('test', target_size=(64, 64), batch_size=32, class_mode='categorical', shuffle=True, seed=seed) #categorical,binary with tf.Session() as s: s.run(tf.global_variables_initializer()) classifier.fit_generator(training_set, samples_per_epoch=250, nb_epoch=35, validation_data=test_set, nb_val_samples=90, shuffle=True, verbose=2) return